mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2026-01-07 15:51:45 +00:00
Merge pull request #77532 from WanLinghao/perf_refactor
Refactor and clean up e2e framework utils, this patch handles test/e2e/framework/perf_util.go file
This commit is contained in:
@@ -76,6 +76,8 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//test/e2e/framework/kubelet:go_default_library",
|
||||
"//test/e2e/framework/perf:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/e2e_node/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@@ -193,6 +195,8 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/kubelet:go_default_library",
|
||||
"//test/e2e/framework/perf:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
||||
"k8s.io/kubernetes/test/e2e/perftype"
|
||||
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"
|
||||
)
|
||||
@@ -58,7 +59,7 @@ func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
|
||||
// as "cpu" and "memory". If an error occurs, no perf data will be logged.
|
||||
func logPerfData(p *perftype.PerfData, perfType string) {
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
framework.PrintPerfData(p)
|
||||
e2eperf.PrintPerfData(p)
|
||||
return
|
||||
}
|
||||
dumpDataToFile(p, p.Labels, "performance-"+perfType)
|
||||
@@ -71,7 +72,7 @@ func logPerfData(p *perftype.PerfData, perfType string) {
|
||||
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1.Time, testInfo map[string]string) {
|
||||
timeSeries := &nodeperftype.NodeTimeSeries{
|
||||
Labels: testInfo,
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
|
||||
}
|
||||
// Attach operation time series.
|
||||
timeSeries.OperationData = map[string][]int64{
|
||||
@@ -108,7 +109,7 @@ func getCumulatedPodTimeSeries(timePerPod map[string]metav1.Time) []int64 {
|
||||
// getLatencyPerfData returns perf data of pod startup latency.
|
||||
func getLatencyPerfData(latency e2emetrics.LatencyMetric, testInfo map[string]string) *perftype.PerfData {
|
||||
return &perftype.PerfData{
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: []perftype.DataItem{
|
||||
{
|
||||
Data: map[string]float64{
|
||||
@@ -131,7 +132,7 @@ func getLatencyPerfData(latency e2emetrics.LatencyMetric, testInfo map[string]st
|
||||
// getThroughputPerfData returns perf data of pod creation startup throughput.
|
||||
func getThroughputPerfData(batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podsNr int, testInfo map[string]string) *perftype.PerfData {
|
||||
return &perftype.PerfData{
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: []perftype.DataItem{
|
||||
{
|
||||
Data: map[string]float64{
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -76,13 +77,13 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
{
|
||||
podsNr: 10,
|
||||
interval: 0 * time.Millisecond,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
cpuLimits: e2ekubelet.ContainersCPUSummary{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
memLimits: e2ekubelet.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
},
|
||||
// percentile limit of single pod startup latency
|
||||
podStartupLimits: e2emetrics.LatencyMetric{
|
||||
@@ -223,13 +224,13 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
{
|
||||
podsNr: 10,
|
||||
bgPodsNr: 50,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
cpuLimits: e2ekubelet.ContainersCPUSummary{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
memLimits: e2ekubelet.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
},
|
||||
podStartupLimits: e2emetrics.LatencyMetric{
|
||||
Perc50: 5000 * time.Millisecond,
|
||||
@@ -302,8 +303,8 @@ type densityTest struct {
|
||||
// API QPS limit
|
||||
APIQPSLimit int
|
||||
// performance limits
|
||||
cpuLimits framework.ContainersCPUSummary
|
||||
memLimits framework.ResourceUsagePerContainer
|
||||
cpuLimits e2ekubelet.ContainersCPUSummary
|
||||
memLimits e2ekubelet.ResourceUsagePerContainer
|
||||
podStartupLimits e2emetrics.LatencyMetric
|
||||
podBatchStartupLimit time.Duration
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e_node/perftype"
|
||||
@@ -69,7 +70,7 @@ type ResourceCollector struct {
|
||||
request *cadvisorapiv2.RequestOptions
|
||||
|
||||
pollingInterval time.Duration
|
||||
buffers map[string][]*framework.ContainerResourceUsage
|
||||
buffers map[string][]*e2ekubelet.ContainerResourceUsage
|
||||
lock sync.RWMutex
|
||||
stopCh chan struct{}
|
||||
}
|
||||
@@ -77,7 +78,7 @@ type ResourceCollector struct {
|
||||
// NewResourceCollector creates a resource collector object which collects
|
||||
// resource usage periodically from Cadvisor
|
||||
func NewResourceCollector(interval time.Duration) *ResourceCollector {
|
||||
buffers := make(map[string][]*framework.ContainerResourceUsage)
|
||||
buffers := make(map[string][]*e2ekubelet.ContainerResourceUsage)
|
||||
return &ResourceCollector{
|
||||
pollingInterval: interval,
|
||||
buffers: buffers,
|
||||
@@ -127,13 +128,13 @@ func (r *ResourceCollector) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range systemContainers {
|
||||
r.buffers[name] = []*framework.ContainerResourceUsage{}
|
||||
r.buffers[name] = []*e2ekubelet.ContainerResourceUsage{}
|
||||
}
|
||||
}
|
||||
|
||||
// GetCPUSummary gets CPU usage in percentile.
|
||||
func (r *ResourceCollector) GetCPUSummary() framework.ContainersCPUSummary {
|
||||
result := make(framework.ContainersCPUSummary)
|
||||
func (r *ResourceCollector) GetCPUSummary() e2ekubelet.ContainersCPUSummary {
|
||||
result := make(e2ekubelet.ContainersCPUSummary)
|
||||
for key, name := range systemContainers {
|
||||
data := r.GetBasicCPUStats(name)
|
||||
result[key] = data
|
||||
@@ -174,8 +175,8 @@ func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.C
|
||||
}
|
||||
|
||||
// computeContainerResourceUsage computes resource usage based on new data sample.
|
||||
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *framework.ContainerResourceUsage {
|
||||
return &framework.ContainerResourceUsage{
|
||||
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *e2ekubelet.ContainerResourceUsage {
|
||||
return &e2ekubelet.ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.Timestamp,
|
||||
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
|
||||
@@ -187,10 +188,10 @@ func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv
|
||||
}
|
||||
|
||||
// GetLatest gets the latest resource usage from stats buffer.
|
||||
func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, error) {
|
||||
func (r *ResourceCollector) GetLatest() (e2ekubelet.ResourceUsagePerContainer, error) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
kubeletstatsv1alpha1 := make(framework.ResourceUsagePerContainer)
|
||||
kubeletstatsv1alpha1 := make(e2ekubelet.ResourceUsagePerContainer)
|
||||
for key, name := range systemContainers {
|
||||
contStats, ok := r.buffers[name]
|
||||
if !ok || len(contStats) == 0 {
|
||||
@@ -201,7 +202,7 @@ func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, er
|
||||
return kubeletstatsv1alpha1, nil
|
||||
}
|
||||
|
||||
type resourceUsageByCPU []*framework.ContainerResourceUsage
|
||||
type resourceUsageByCPU []*e2ekubelet.ContainerResourceUsage
|
||||
|
||||
func (r resourceUsageByCPU) Len() int { return len(r) }
|
||||
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
@@ -218,7 +219,7 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
|
||||
result := make(map[float64]float64, len(percentiles))
|
||||
|
||||
// We must make a copy of array, otherwise the timeseries order is changed.
|
||||
usages := make([]*framework.ContainerResourceUsage, 0)
|
||||
usages := make([]*e2ekubelet.ContainerResourceUsage, 0)
|
||||
usages = append(usages, r.buffers[containerName]...)
|
||||
|
||||
sort.Sort(resourceUsageByCPU(usages))
|
||||
@@ -234,7 +235,7 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
|
||||
return result
|
||||
}
|
||||
|
||||
func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string {
|
||||
func formatResourceUsageStats(containerStats e2ekubelet.ResourceUsagePerContainer) string {
|
||||
// Example output:
|
||||
//
|
||||
// Resource usage:
|
||||
@@ -252,7 +253,7 @@ func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer
|
||||
return fmt.Sprintf("Resource usage:\n%s", buf.String())
|
||||
}
|
||||
|
||||
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
|
||||
func formatCPUSummary(summary e2ekubelet.ContainersCPUSummary) string {
|
||||
// Example output for a node (the percentiles may differ):
|
||||
// CPU usage of containers:
|
||||
// container 5th% 50th% 90th% 95th%
|
||||
|
||||
@@ -26,7 +26,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -67,13 +69,13 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
||||
rTests := []resourceTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
cpuLimits: e2ekubelet.ContainersCPUSummary{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.35},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.30, 0.95: 0.40},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 400 * 1024 * 1024},
|
||||
memLimits: e2ekubelet.ResourceUsagePerContainer{
|
||||
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
|
||||
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 400 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -125,8 +127,8 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
||||
|
||||
type resourceTest struct {
|
||||
podsNr int
|
||||
cpuLimits framework.ContainersCPUSummary
|
||||
memLimits framework.ResourceUsagePerContainer
|
||||
cpuLimits e2ekubelet.ContainersCPUSummary
|
||||
memLimits e2ekubelet.ResourceUsagePerContainer
|
||||
}
|
||||
|
||||
func (rt *resourceTest) getTestName() string {
|
||||
@@ -183,8 +185,8 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
}
|
||||
|
||||
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
|
||||
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
|
||||
memLimits framework.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
|
||||
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits e2ekubelet.ContainersCPUSummary,
|
||||
memLimits e2ekubelet.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
|
||||
nodeName := framework.TestContext.NodeName
|
||||
|
||||
// Obtain memory PerfData
|
||||
@@ -192,19 +194,19 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))
|
||||
|
||||
usagePerNode := make(framework.ResourceUsagePerNode)
|
||||
usagePerNode := make(e2ekubelet.ResourceUsagePerNode)
|
||||
usagePerNode[nodeName] = usagePerContainer
|
||||
|
||||
// Obtain CPU PerfData
|
||||
cpuSummary := rc.GetCPUSummary()
|
||||
e2elog.Logf("%s", formatCPUSummary(cpuSummary))
|
||||
|
||||
cpuSummaryPerNode := make(framework.NodesCPUSummary)
|
||||
cpuSummaryPerNode := make(e2ekubelet.NodesCPUSummary)
|
||||
cpuSummaryPerNode[nodeName] = cpuSummary
|
||||
|
||||
// Print resource usage
|
||||
logPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo), "memory")
|
||||
logPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo), "cpu")
|
||||
logPerfData(e2eperf.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo), "memory")
|
||||
logPerfData(e2eperf.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo), "cpu")
|
||||
|
||||
// Verify resource usage
|
||||
if isVerify {
|
||||
@@ -213,7 +215,7 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
|
||||
}
|
||||
}
|
||||
|
||||
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
|
||||
func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) {
|
||||
if expected == nil {
|
||||
return
|
||||
}
|
||||
@@ -249,7 +251,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
|
||||
}
|
||||
}
|
||||
|
||||
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
|
||||
func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet.NodesCPUSummary) {
|
||||
if expected == nil {
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user