mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-26 19:35:10 +00:00
This is the result of automatically editing source files like this:
go install golang.org/x/tools/cmd/goimports@latest
find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh
with e2e-framework-sed.sh containing this:
sed -i \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \
-e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \
-e "s/framework.AllNodes\b/e2edebug.AllNodes/" \
-e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \
-e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \
-e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \
-e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \
-e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \
-e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \
-e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \
-e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \
-e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \
-e "s/framework.EventsLister\b/e2edebug.EventsLister/" \
-e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \
-e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \
-e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \
-e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \
-e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \
-e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \
-e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \
-e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \
-e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \
-e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \
-e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \
-e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \
-e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \
-e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \
-e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \
-e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \
-e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \
-e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \
-e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \
-e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \
-e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \
-e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \
-e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \
-e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \
-e "s/framework.NodesSet\b/e2edebug.NodesSet/" \
-e "s/framework.PodClient\b/e2epod.PodClient/" \
-e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \
-e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \
-e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \
-e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \
-e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \
-e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \
-e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \
-e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \
-e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \
-e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \
-e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \
-e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \
-e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \
-e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \
-e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \
-e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \
-e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \
-e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \
-e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \
-e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \
-e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \
-e "s/framework.WorkItem\b/e2edebug.WorkItem/" \
"$@"
for i in "$@"; do
# Import all sub packages and let goimports figure out which of those
# are redundant (= already imported) or not needed.
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i"
goimports -w "$i"
done
241 lines
9.5 KiB
Go
241 lines
9.5 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package vsphere
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
"github.com/onsi/gomega"
|
|
v1 "k8s.io/api/core/v1"
|
|
storagev1 "k8s.io/api/storage/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
|
)
|
|
|
|
/*
|
|
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
|
|
The following actions will be performed as part of this test.
|
|
|
|
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
|
|
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
|
|
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
|
|
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
|
|
5. Once all the go routines are completed, we delete all the pods and volumes.
|
|
*/
|
|
const (
|
|
NodeLabelKey = "vsphere_e2e_label"
|
|
)
|
|
|
|
// NodeSelector holds
|
|
type NodeSelector struct {
|
|
labelKey string
|
|
labelValue string
|
|
}
|
|
|
|
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
|
f := framework.NewDefaultFramework("vcp-at-scale")
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
|
|
|
var (
|
|
client clientset.Interface
|
|
namespace string
|
|
nodeSelectorList []*NodeSelector
|
|
volumeCount int
|
|
numberOfInstances int
|
|
volumesPerPod int
|
|
policyName string
|
|
datastoreName string
|
|
nodeVolumeMapChan chan map[string][]string
|
|
nodes *v1.NodeList
|
|
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
|
)
|
|
|
|
ginkgo.BeforeEach(func() {
|
|
e2eskipper.SkipUnlessProviderIs("vsphere")
|
|
Bootstrap(f)
|
|
client = f.ClientSet
|
|
namespace = f.Namespace.Name
|
|
nodeVolumeMapChan = make(chan map[string][]string)
|
|
|
|
// Read the environment variables
|
|
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
|
|
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
|
|
|
|
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
|
|
if numberOfInstances > 5 {
|
|
framework.Failf("Maximum 5 instances allowed, got instead: %v", numberOfInstances)
|
|
}
|
|
if numberOfInstances > volumeCount {
|
|
framework.Failf("Number of instances: %v cannot be greater than volume count: %v", numberOfInstances, volumeCount)
|
|
}
|
|
|
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
|
|
|
var err error
|
|
nodes, err = e2enode.GetReadySchedulableNodes(client)
|
|
framework.ExpectNoError(err)
|
|
if len(nodes.Items) < 2 {
|
|
e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
|
}
|
|
// Verify volume count specified by the user can be satisfied
|
|
if volumeCount > volumesPerNode*len(nodes.Items) {
|
|
e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
|
|
}
|
|
nodeSelectorList = createNodeLabels(client, namespace, nodes)
|
|
ginkgo.DeferCleanup(func() {
|
|
for _, node := range nodes.Items {
|
|
e2enode.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
|
|
}
|
|
})
|
|
})
|
|
|
|
ginkgo.It("vsphere scale tests", func() {
|
|
var pvcClaimList []string
|
|
nodeVolumeMap := make(map[string][]string)
|
|
// Volumes will be provisioned with each different types of Storage Class
|
|
scArrays := make([]*storagev1.StorageClass, len(scNames))
|
|
for index, scname := range scNames {
|
|
// Create vSphere Storage Class
|
|
ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname))
|
|
var sc *storagev1.StorageClass
|
|
scParams := make(map[string]string)
|
|
var err error
|
|
switch scname {
|
|
case storageclass1:
|
|
scParams = nil
|
|
case storageclass2:
|
|
scParams[PolicyHostFailuresToTolerate] = "1"
|
|
case storageclass3:
|
|
scParams[SpbmStoragePolicy] = policyName
|
|
case storageclass4:
|
|
scParams[Datastore] = datastoreName
|
|
}
|
|
sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{})
|
|
gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
|
|
framework.ExpectNoError(err, "Failed to create storage class")
|
|
defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{})
|
|
scArrays[index] = sc
|
|
}
|
|
|
|
volumeCountPerInstance := volumeCount / numberOfInstances
|
|
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
|
|
if instanceCount == numberOfInstances-1 {
|
|
volumeCountPerInstance = volumeCount
|
|
}
|
|
volumeCount = volumeCount - volumeCountPerInstance
|
|
go VolumeCreateAndAttach(client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
|
|
}
|
|
|
|
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
|
|
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
|
|
for node, volumeList := range <-nodeVolumeMapChan {
|
|
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
|
|
}
|
|
}
|
|
podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
|
framework.ExpectNoError(err, "Failed to list pods")
|
|
for _, pod := range podList.Items {
|
|
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
|
|
ginkgo.By("Deleting pod")
|
|
err = e2epod.DeletePodWithWait(client, &pod)
|
|
framework.ExpectNoError(err)
|
|
}
|
|
ginkgo.By("Waiting for volumes to be detached from the node")
|
|
err = waitForVSphereDisksToDetach(nodeVolumeMap)
|
|
framework.ExpectNoError(err)
|
|
|
|
for _, pvcClaim := range pvcClaimList {
|
|
err = e2epv.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
|
|
framework.ExpectNoError(err)
|
|
}
|
|
})
|
|
})
|
|
|
|
// Get PVC claims for the pod
|
|
func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
|
|
pvcClaimList := make([]string, volumesPerPod)
|
|
for i, volumespec := range pod.Spec.Volumes {
|
|
if volumespec.PersistentVolumeClaim != nil {
|
|
pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
|
|
}
|
|
}
|
|
return pvcClaimList
|
|
}
|
|
|
|
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
|
|
func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
|
defer ginkgo.GinkgoRecover()
|
|
nodeVolumeMap := make(map[string][]string)
|
|
nodeSelectorIndex := 0
|
|
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
|
|
if (volumeCountPerInstance - index) < volumesPerPod {
|
|
volumesPerPod = volumeCountPerInstance - index
|
|
}
|
|
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
|
|
for i := 0; i < volumesPerPod; i++ {
|
|
ginkgo.By("Creating PVC using the Storage Class")
|
|
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
|
|
framework.ExpectNoError(err)
|
|
pvclaims[i] = pvclaim
|
|
}
|
|
|
|
ginkgo.By("Waiting for claim to be in bound phase")
|
|
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
|
framework.ExpectNoError(err)
|
|
|
|
ginkgo.By("Creating pod to attach PV to the node")
|
|
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
|
|
// Create pod to attach Volume to Node
|
|
pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
|
|
framework.ExpectNoError(err)
|
|
|
|
for _, pv := range persistentvolumes {
|
|
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
|
|
}
|
|
ginkgo.By("Verify the volume is accessible and available in the pod")
|
|
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
|
nodeSelectorIndex++
|
|
}
|
|
nodeVolumeMapChan <- nodeVolumeMap
|
|
close(nodeVolumeMapChan)
|
|
}
|
|
|
|
func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
|
|
var nodeSelectorList []*NodeSelector
|
|
for i, node := range nodes.Items {
|
|
labelVal := "vsphere_e2e_" + strconv.Itoa(i)
|
|
nodeSelector := &NodeSelector{
|
|
labelKey: NodeLabelKey,
|
|
labelValue: labelVal,
|
|
}
|
|
nodeSelectorList = append(nodeSelectorList, nodeSelector)
|
|
e2enode.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
|
|
}
|
|
return nodeSelectorList
|
|
}
|