mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-12-01 05:43:54 +00:00
This is the result of automatically editing source files like this:
go install golang.org/x/tools/cmd/goimports@latest
find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh
with e2e-framework-sed.sh containing this:
sed -i \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \
-e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \
-e "s/framework.AllNodes\b/e2edebug.AllNodes/" \
-e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \
-e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \
-e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \
-e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \
-e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \
-e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \
-e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \
-e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \
-e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \
-e "s/framework.EventsLister\b/e2edebug.EventsLister/" \
-e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \
-e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \
-e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \
-e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \
-e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \
-e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \
-e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \
-e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \
-e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \
-e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \
-e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \
-e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \
-e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \
-e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \
-e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \
-e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \
-e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \
-e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \
-e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \
-e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \
-e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \
-e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \
-e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \
-e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \
-e "s/framework.NodesSet\b/e2edebug.NodesSet/" \
-e "s/framework.PodClient\b/e2epod.PodClient/" \
-e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \
-e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \
-e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \
-e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \
-e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \
-e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \
-e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \
-e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \
-e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \
-e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \
-e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \
-e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \
-e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \
-e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \
-e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \
-e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \
-e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \
-e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \
-e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \
-e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \
-e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \
-e "s/framework.WorkItem\b/e2edebug.WorkItem/" \
"$@"
for i in "$@"; do
# Import all sub packages and let goimports figure out which of those
# are redundant (= already imported) or not needed.
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i"
goimports -w "$i"
done
391 lines
19 KiB
Go
391 lines
19 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package vsphere
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
"github.com/onsi/gomega"
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
|
)
|
|
|
|
var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
|
|
f := framework.NewDefaultFramework("volume-placement")
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
|
const (
|
|
NodeLabelKey = "vsphere_e2e_label_volume_placement"
|
|
)
|
|
var (
|
|
c clientset.Interface
|
|
ns string
|
|
volumePaths []string
|
|
node1Name string
|
|
node1KeyValueLabel map[string]string
|
|
node2Name string
|
|
node2KeyValueLabel map[string]string
|
|
nodeInfo *NodeInfo
|
|
vsp *VSphere
|
|
)
|
|
ginkgo.BeforeEach(func() {
|
|
e2eskipper.SkipUnlessProviderIs("vsphere")
|
|
Bootstrap(f)
|
|
c = f.ClientSet
|
|
ns = f.Namespace.Name
|
|
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
|
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
|
|
ginkgo.DeferCleanup(func() {
|
|
if len(node1KeyValueLabel) > 0 {
|
|
e2enode.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
|
|
}
|
|
if len(node2KeyValueLabel) > 0 {
|
|
e2enode.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
|
|
}
|
|
})
|
|
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
|
|
vsp = nodeInfo.VSphere
|
|
ginkgo.By("creating vmdk")
|
|
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
|
framework.ExpectNoError(err)
|
|
volumePaths = append(volumePaths, volumePath)
|
|
ginkgo.DeferCleanup(func() {
|
|
for _, volumePath := range volumePaths {
|
|
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
|
}
|
|
volumePaths = nil
|
|
})
|
|
})
|
|
|
|
/*
|
|
Steps
|
|
|
|
1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
|
|
2. Create pod and wait for pod to become ready.
|
|
3. Verify volume is attached to the node1.
|
|
4. Create empty file on the volume to verify volume is writable.
|
|
5. Verify newly created file and previously created files exist on the volume.
|
|
6. Delete pod.
|
|
7. Wait for volume to be detached from the node1.
|
|
8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
|
|
|
|
*/
|
|
|
|
ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() {
|
|
var volumeFiles []string
|
|
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
|
|
volumeFiles = append(volumeFiles, newEmptyFileName)
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
|
|
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
|
|
volumeFiles = append(volumeFiles, newEmptyFileName)
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
})
|
|
|
|
/*
|
|
Steps
|
|
|
|
1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
|
|
2. Create pod and wait for POD to become ready.
|
|
3. Verify volume is attached to the node1.
|
|
4. Create empty file on the volume to verify volume is writable.
|
|
5. Verify newly created file and previously created files exist on the volume.
|
|
6. Delete pod.
|
|
7. Wait for volume to be detached from the node1.
|
|
8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
|
|
9. Create pod and wait for pod to become ready.
|
|
10. Verify volume is attached to the node2.
|
|
11. Create empty file on the volume to verify volume is writable.
|
|
12. Verify newly created file and previously created files exist on the volume.
|
|
13. Delete pod.
|
|
*/
|
|
|
|
ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
|
|
var volumeFiles []string
|
|
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
|
|
volumeFiles = append(volumeFiles, newEmptyFileName)
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
|
|
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
|
|
|
|
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
|
|
volumeFiles = append(volumeFiles, newEmptyFileName)
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
|
|
})
|
|
|
|
/*
|
|
Test multiple volumes from same datastore within the same pod
|
|
1. Create volumes - vmdk2
|
|
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
|
|
3. Create pod using spec created in step-2 and wait for pod to become ready.
|
|
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
|
|
5. Delete pod.
|
|
6. Wait for vmdk1 and vmdk2 to be detached from node.
|
|
7. Create pod using spec created in step-2 and wait for pod to become ready.
|
|
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
|
|
9. Delete POD.
|
|
10. Wait for vmdk1 and vmdk2 to be detached from node.
|
|
*/
|
|
|
|
ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() {
|
|
ginkgo.By("creating another vmdk")
|
|
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
|
framework.ExpectNoError(err)
|
|
volumePaths = append(volumePaths, volumePath)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
|
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
volumeFiles := []string{
|
|
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
|
|
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
|
|
}
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
|
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
newEmptyFilesNames := []string{
|
|
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
|
|
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
|
|
}
|
|
volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
|
|
volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
|
|
})
|
|
|
|
/*
|
|
Test multiple volumes from different datastore within the same pod
|
|
1. Create volumes - vmdk2 on non default shared datastore.
|
|
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
|
|
3. Create pod using spec created in step-2 and wait for pod to become ready.
|
|
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
|
|
5. Delete pod.
|
|
6. Wait for vmdk1 and vmdk2 to be detached from node.
|
|
7. Create pod using spec created in step-2 and wait for pod to become ready.
|
|
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
|
|
9. Delete POD.
|
|
10. Wait for vmdk1 and vmdk2 to be detached from node.
|
|
*/
|
|
ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() {
|
|
ginkgo.By("creating another vmdk on non default shared datastore")
|
|
var volumeOptions *VolumeOptions
|
|
volumeOptions = new(VolumeOptions)
|
|
volumeOptions.CapacityKB = 2097152
|
|
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
|
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
|
|
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
|
|
|
|
framework.ExpectNoError(err)
|
|
volumePaths = append(volumePaths, volumePath)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
|
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
volumeFiles := []string{
|
|
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
|
|
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
|
|
}
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
|
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
newEmptyFileNames := []string{
|
|
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
|
|
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
|
|
}
|
|
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
|
|
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
|
|
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
|
|
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
|
})
|
|
|
|
/*
|
|
Test Back-to-back pod creation/deletion with different volume sources on the same worker node
|
|
1. Create volumes - vmdk2
|
|
2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
|
|
3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
|
|
4. Create pod-A using pod-SpecA and wait for pod to become ready.
|
|
5. Create pod-B using pod-SpecB and wait for POD to become ready.
|
|
6. Verify volumes are attached to the node.
|
|
7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
|
|
8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
|
|
9. Delete pod-A and pod-B
|
|
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
|
|
11. Wait for vmdk1 and vmdk2 to be detached from node.
|
|
*/
|
|
ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
|
|
var (
|
|
podA *v1.Pod
|
|
podB *v1.Pod
|
|
testvolumePathsPodA []string
|
|
testvolumePathsPodB []string
|
|
podAFiles []string
|
|
podBFiles []string
|
|
)
|
|
|
|
defer func() {
|
|
ginkgo.By("clean up undeleted pods")
|
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "defer: Failed to delete pod ", podA.Name)
|
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "defer: Failed to delete pod ", podB.Name)
|
|
ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
|
|
for _, volumePath := range volumePaths {
|
|
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
|
|
}
|
|
}()
|
|
|
|
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
|
|
// Create another VMDK Volume
|
|
ginkgo.By("creating another vmdk")
|
|
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
|
framework.ExpectNoError(err)
|
|
volumePaths = append(volumePaths, volumePath)
|
|
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
|
|
|
|
for index := 0; index < 5; index++ {
|
|
ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
|
|
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
|
|
|
|
ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
|
|
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
|
|
|
|
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
|
|
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
|
|
podAFiles = append(podAFiles, podAFileName)
|
|
podBFiles = append(podBFiles, podBFileName)
|
|
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
ginkgo.By("Creating empty file on volume mounted on pod-A")
|
|
e2eoutput.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
|
|
|
|
ginkgo.By("Creating empty file volume mounted on pod-B")
|
|
e2eoutput.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
|
|
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A")
|
|
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
|
|
ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B")
|
|
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
|
|
|
|
ginkgo.By("Deleting pod-A")
|
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "Failed to delete pod ", podA.Name)
|
|
ginkgo.By("Deleting pod-B")
|
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "Failed to delete pod ", podB.Name)
|
|
}
|
|
})
|
|
})
|
|
|
|
func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
|
|
nodes, err := e2enode.GetBoundedReadySchedulableNodes(client, 2)
|
|
framework.ExpectNoError(err)
|
|
if len(nodes.Items) < 2 {
|
|
e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
|
}
|
|
node1Name = nodes.Items[0].Name
|
|
node2Name = nodes.Items[1].Name
|
|
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
|
|
node1KeyValueLabel = make(map[string]string)
|
|
node1KeyValueLabel[NodeLabelKey] = node1LabelValue
|
|
e2enode.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
|
|
|
|
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
|
|
node2KeyValueLabel = make(map[string]string)
|
|
node2KeyValueLabel[NodeLabelKey] = node2LabelValue
|
|
e2enode.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
|
|
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
|
|
}
|
|
|
|
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
|
|
var pod *v1.Pod
|
|
var err error
|
|
ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
|
|
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
|
|
|
|
pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{})
|
|
framework.ExpectNoError(err)
|
|
ginkgo.By("Waiting for pod to be ready")
|
|
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
|
|
|
|
ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
|
|
for _, volumePath := range volumePaths {
|
|
isAttached, err := diskIsAttached(volumePath, nodeName)
|
|
framework.ExpectNoError(err)
|
|
if !isAttached {
|
|
framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName)
|
|
}
|
|
}
|
|
return pod
|
|
}
|
|
|
|
func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
|
|
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
|
ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
|
|
createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
|
|
|
|
// Verify newly and previously created files present on the volume mounted on the pod
|
|
ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
|
|
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
|
|
}
|
|
|
|
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
|
|
ginkgo.By("Deleting pod")
|
|
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
|
|
|
|
ginkgo.By("Waiting for volume to be detached from the node")
|
|
for _, volumePath := range volumePaths {
|
|
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
|
|
}
|
|
}
|