mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 12:18:16 +00:00 
			
		
		
		
	Move rest of performance data gathered by tests to Summaries
This commit is contained in:
		@@ -21,7 +21,6 @@ import (
 | 
				
			|||||||
	"bytes"
 | 
						"bytes"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"io/ioutil"
 | 
					 | 
				
			||||||
	"os"
 | 
						"os"
 | 
				
			||||||
	"path"
 | 
						"path"
 | 
				
			||||||
	"reflect"
 | 
						"reflect"
 | 
				
			||||||
@@ -91,6 +90,10 @@ type Framework struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// configuration for framework's client
 | 
						// configuration for framework's client
 | 
				
			||||||
	Options FrameworkOptions
 | 
						Options FrameworkOptions
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Place where various additional data is stored during test run to be printed to ReportDir,
 | 
				
			||||||
 | 
						// or stdout if ReportDir is not set once test ends.
 | 
				
			||||||
 | 
						TestSummaries []TestDataSummary
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type TestDataSummary interface {
 | 
					type TestDataSummary interface {
 | 
				
			||||||
@@ -307,19 +310,18 @@ func (f *Framework) AfterEach() {
 | 
				
			|||||||
		LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
 | 
							LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	summaries := make([]TestDataSummary, 0)
 | 
					 | 
				
			||||||
	if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
 | 
						if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
 | 
				
			||||||
		By("Collecting resource usage data")
 | 
							By("Collecting resource usage data")
 | 
				
			||||||
		summary, resourceViolationError := f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
 | 
							summary, resourceViolationError := f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
 | 
				
			||||||
		defer ExpectNoError(resourceViolationError)
 | 
							defer ExpectNoError(resourceViolationError)
 | 
				
			||||||
		summaries = append(summaries, summary)
 | 
							f.TestSummaries = append(f.TestSummaries, summary)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if TestContext.GatherLogsSizes {
 | 
						if TestContext.GatherLogsSizes {
 | 
				
			||||||
		By("Gathering log sizes data")
 | 
							By("Gathering log sizes data")
 | 
				
			||||||
		close(f.logsSizeCloseChannel)
 | 
							close(f.logsSizeCloseChannel)
 | 
				
			||||||
		f.logsSizeWaitGroup.Wait()
 | 
							f.logsSizeWaitGroup.Wait()
 | 
				
			||||||
		summaries = append(summaries, f.logsSizeVerifier.GetSummary())
 | 
							f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if TestContext.GatherMetricsAfterTest {
 | 
						if TestContext.GatherMetricsAfterTest {
 | 
				
			||||||
@@ -334,44 +336,12 @@ func (f *Framework) AfterEach() {
 | 
				
			|||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				Logf("MetricsGrabber failed to grab metrics (skipping metrics gathering): %v", err)
 | 
									Logf("MetricsGrabber failed to grab metrics (skipping metrics gathering): %v", err)
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				summaries = append(summaries, (*MetricsForE2E)(&received))
 | 
									f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	outputTypes := strings.Split(TestContext.OutputPrintType, ",")
 | 
						PrintSummaries(f.TestSummaries, f.BaseName)
 | 
				
			||||||
	now := time.Now()
 | 
					 | 
				
			||||||
	for _, printType := range outputTypes {
 | 
					 | 
				
			||||||
		switch printType {
 | 
					 | 
				
			||||||
		case "hr":
 | 
					 | 
				
			||||||
			for i := range summaries {
 | 
					 | 
				
			||||||
				if TestContext.ReportDir == "" {
 | 
					 | 
				
			||||||
					Logf(summaries[i].PrintHumanReadable())
 | 
					 | 
				
			||||||
				} else {
 | 
					 | 
				
			||||||
					// TODO: learn to extract test name and append it to the kind instead of timestamp.
 | 
					 | 
				
			||||||
					filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+f.BaseName+"_"+now.Format(time.RFC3339)+".txt")
 | 
					 | 
				
			||||||
					if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
 | 
					 | 
				
			||||||
						Logf("Failed to write file %v with test performance data: %v", filePath, err)
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		case "json":
 | 
					 | 
				
			||||||
			for i := range summaries {
 | 
					 | 
				
			||||||
				if TestContext.ReportDir == "" {
 | 
					 | 
				
			||||||
					Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
 | 
					 | 
				
			||||||
					Logf("Finished")
 | 
					 | 
				
			||||||
				} else {
 | 
					 | 
				
			||||||
					// TODO: learn to extract test name and append it to the kind instead of timestamp.
 | 
					 | 
				
			||||||
					filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+f.BaseName+"_"+now.Format(time.RFC3339)+".json")
 | 
					 | 
				
			||||||
					if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
 | 
					 | 
				
			||||||
						Logf("Failed to write file %v with test performance data: %v", filePath, err)
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		default:
 | 
					 | 
				
			||||||
			Logf("Unknown output type: %v. Skipping.", printType)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Check whether all nodes are ready after the test.
 | 
						// Check whether all nodes are ready after the test.
 | 
				
			||||||
	// This is explicitly done at the very end of the test, to avoid
 | 
						// This is explicitly done at the very end of the test, to avoid
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -150,12 +150,36 @@ type PodStartupLatency struct {
 | 
				
			|||||||
	Latency LatencyMetric `json:"latency"`
 | 
						Latency LatencyMetric `json:"latency"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *PodStartupLatency) SummaryKind() string {
 | 
				
			||||||
 | 
						return "PodStartupLatency"
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *PodStartupLatency) PrintHumanReadable() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*l)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *PodStartupLatency) PrintJSON() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*l)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type SchedulingLatency struct {
 | 
					type SchedulingLatency struct {
 | 
				
			||||||
	Scheduling LatencyMetric `json:"scheduling"`
 | 
						Scheduling LatencyMetric `json:"scheduling"`
 | 
				
			||||||
	Binding    LatencyMetric `json:"binding"`
 | 
						Binding    LatencyMetric `json:"binding"`
 | 
				
			||||||
	Total      LatencyMetric `json:"total"`
 | 
						Total      LatencyMetric `json:"total"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *SchedulingLatency) SummaryKind() string {
 | 
				
			||||||
 | 
						return "SchedulingLatency"
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *SchedulingLatency) PrintHumanReadable() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*l)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (l *SchedulingLatency) PrintJSON() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*l)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type SaturationTime struct {
 | 
					type SaturationTime struct {
 | 
				
			||||||
	TimeToSaturate time.Duration `json:"timeToStaturate"`
 | 
						TimeToSaturate time.Duration `json:"timeToStaturate"`
 | 
				
			||||||
	NumberOfNodes  int           `json:"numberOfNodes"`
 | 
						NumberOfNodes  int           `json:"numberOfNodes"`
 | 
				
			||||||
@@ -174,9 +198,23 @@ type APIResponsiveness struct {
 | 
				
			|||||||
	APICalls []APICall `json:"apicalls"`
 | 
						APICalls []APICall `json:"apicalls"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (a APIResponsiveness) Len() int      { return len(a.APICalls) }
 | 
					func (a *APIResponsiveness) SummaryKind() string {
 | 
				
			||||||
func (a APIResponsiveness) Swap(i, j int) { a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i] }
 | 
						return "APIResponsiveness"
 | 
				
			||||||
func (a APIResponsiveness) Less(i, j int) bool {
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (a *APIResponsiveness) PrintHumanReadable() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*a)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (a *APIResponsiveness) PrintJSON() string {
 | 
				
			||||||
 | 
						return PrettyPrintJSON(*a)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (a *APIResponsiveness) Len() int { return len(a.APICalls) }
 | 
				
			||||||
 | 
					func (a *APIResponsiveness) Swap(i, j int) {
 | 
				
			||||||
 | 
						a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					func (a *APIResponsiveness) Less(i, j int) bool {
 | 
				
			||||||
	return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
 | 
						return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -225,17 +263,17 @@ func (a *APIResponsiveness) addMetricRequestCount(resource, verb string, count i
 | 
				
			|||||||
	a.APICalls = append(a.APICalls, apicall)
 | 
						a.APICalls = append(a.APICalls, apicall)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
 | 
					func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) {
 | 
				
			||||||
	var a APIResponsiveness
 | 
						var a APIResponsiveness
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	body, err := getMetrics(c)
 | 
						body, err := getMetrics(c)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return a, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	samples, err := extractMetricSamples(body)
 | 
						samples, err := extractMetricSamples(body)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return a, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ignoredResources := sets.NewString("events")
 | 
						ignoredResources := sets.NewString("events")
 | 
				
			||||||
@@ -262,7 +300,7 @@ func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
 | 
				
			|||||||
			latency := sample.Value
 | 
								latency := sample.Value
 | 
				
			||||||
			quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
 | 
								quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return a, err
 | 
									return nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			a.addMetricRequestLatency(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
 | 
								a.addMetricRequestLatency(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
 | 
				
			||||||
		case "apiserver_request_count":
 | 
							case "apiserver_request_count":
 | 
				
			||||||
@@ -272,22 +310,22 @@ func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return a, err
 | 
						return &a, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Prints top five summary metrics for request types with latency and returns
 | 
					// Prints top five summary metrics for request types with latency and returns
 | 
				
			||||||
// number of such request types above threshold.
 | 
					// number of such request types above threshold.
 | 
				
			||||||
func HighLatencyRequests(c clientset.Interface) (int, error) {
 | 
					func HighLatencyRequests(c clientset.Interface) (int, *APIResponsiveness, error) {
 | 
				
			||||||
	metrics, err := readLatencyMetrics(c)
 | 
						metrics, err := readLatencyMetrics(c)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return 0, err
 | 
							return 0, metrics, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	sort.Sort(sort.Reverse(metrics))
 | 
						sort.Sort(sort.Reverse(metrics))
 | 
				
			||||||
	badMetrics := 0
 | 
						badMetrics := 0
 | 
				
			||||||
	top := 5
 | 
						top := 5
 | 
				
			||||||
	for _, metric := range metrics.APICalls {
 | 
						for i := range metrics.APICalls {
 | 
				
			||||||
		isBad := false
 | 
							isBad := false
 | 
				
			||||||
		if metric.Latency.Perc99 > apiCallLatencyThreshold {
 | 
							if metrics.APICalls[i].Latency.Perc99 > apiCallLatencyThreshold {
 | 
				
			||||||
			badMetrics++
 | 
								badMetrics++
 | 
				
			||||||
			isBad = true
 | 
								isBad = true
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -297,23 +335,15 @@ func HighLatencyRequests(c clientset.Interface) (int, error) {
 | 
				
			|||||||
			if isBad {
 | 
								if isBad {
 | 
				
			||||||
				prefix = "WARNING "
 | 
									prefix = "WARNING "
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			Logf("%vTop latency metric: %+v", prefix, metric)
 | 
								Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i])
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						return badMetrics, metrics, nil
 | 
				
			||||||
	// TODO(random-liu): Remove the log when we migrate to new perfdash
 | 
					 | 
				
			||||||
	Logf("API calls latencies: %s", PrettyPrintJSON(metrics))
 | 
					 | 
				
			||||||
	// Log perf data
 | 
					 | 
				
			||||||
	PrintPerfData(ApiCallToPerfData(metrics))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return badMetrics, nil
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
 | 
					// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
 | 
				
			||||||
// within the threshold.
 | 
					// within the threshold.
 | 
				
			||||||
func VerifyPodStartupLatency(latency PodStartupLatency) error {
 | 
					func VerifyPodStartupLatency(latency *PodStartupLatency) error {
 | 
				
			||||||
	Logf("Pod startup latency: %s", PrettyPrintJSON(latency))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if latency.Latency.Perc50 > podStartupThreshold {
 | 
						if latency.Latency.Perc50 > podStartupThreshold {
 | 
				
			||||||
		return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
 | 
							return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -349,7 +379,7 @@ func getMetrics(c clientset.Interface) (string, error) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Retrieves scheduler metrics information.
 | 
					// Retrieves scheduler metrics information.
 | 
				
			||||||
func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
 | 
					func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
 | 
				
			||||||
	result := SchedulingLatency{}
 | 
						result := SchedulingLatency{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Check if master Node is registered
 | 
						// Check if master Node is registered
 | 
				
			||||||
@@ -358,7 +388,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
 | 
						subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return result, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var data string
 | 
						var data string
 | 
				
			||||||
@@ -400,13 +430,13 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
 | 
				
			|||||||
		cmd := "curl http://localhost:10251/metrics"
 | 
							cmd := "curl http://localhost:10251/metrics"
 | 
				
			||||||
		sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
 | 
							sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
 | 
				
			||||||
		if err != nil || sshResult.Code != 0 {
 | 
							if err != nil || sshResult.Code != 0 {
 | 
				
			||||||
			return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
 | 
								return &result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		data = sshResult.Stdout
 | 
							data = sshResult.Stdout
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	samples, err := extractMetricSamples(data)
 | 
						samples, err := extractMetricSamples(data)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return result, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, sample := range samples {
 | 
						for _, sample := range samples {
 | 
				
			||||||
@@ -426,23 +456,20 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
 | 
				
			|||||||
		latency := sample.Value
 | 
							latency := sample.Value
 | 
				
			||||||
		quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
 | 
							quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return result, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
 | 
							setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return result, nil
 | 
						return &result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Verifies (currently just by logging them) the scheduling latencies.
 | 
					// Verifies (currently just by logging them) the scheduling latencies.
 | 
				
			||||||
func VerifySchedulerLatency(c clientset.Interface) error {
 | 
					func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
 | 
				
			||||||
	latency, err := getSchedulingLatency(c)
 | 
						latency, err := getSchedulingLatency(c)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	Logf("Scheduling latency: %s", PrettyPrintJSON(latency))
 | 
						return latency, nil
 | 
				
			||||||
 | 
					 | 
				
			||||||
	// TODO: Add some reasonable checks once we know more about the values.
 | 
					 | 
				
			||||||
	return nil
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func PrettyPrintJSON(metrics interface{}) string {
 | 
					func PrettyPrintJSON(metrics interface{}) string {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -162,7 +162,7 @@ func RegisterCommonFlags() {
 | 
				
			|||||||
	flag.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
 | 
						flag.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
 | 
				
			||||||
	flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
 | 
						flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
 | 
				
			||||||
	flag.BoolVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after each test.")
 | 
						flag.BoolVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after each test.")
 | 
				
			||||||
	flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "hr", "Comma separated list: 'hr' for human readable summaries 'json' for JSON ones.")
 | 
						flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
 | 
				
			||||||
	flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
 | 
						flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
 | 
				
			||||||
	flag.BoolVar(&TestContext.DisableLogDump, "disable-log-dump", false, "If set to true, logs from master and nodes won't be gathered after test run.")
 | 
						flag.BoolVar(&TestContext.DisableLogDump, "disable-log-dump", false, "If set to true, logs from master and nodes won't be gathered after test run.")
 | 
				
			||||||
	flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
 | 
						flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -5526,3 +5526,38 @@ func GetAzureCloud() (*azure.Cloud, error) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return cloud, nil
 | 
						return cloud, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
 | 
				
			||||||
 | 
						now := time.Now()
 | 
				
			||||||
 | 
						for i := range summaries {
 | 
				
			||||||
 | 
							Logf("Printing summary: %v", summaries[i].SummaryKind())
 | 
				
			||||||
 | 
							switch TestContext.OutputPrintType {
 | 
				
			||||||
 | 
							case "hr":
 | 
				
			||||||
 | 
								if TestContext.ReportDir == "" {
 | 
				
			||||||
 | 
									Logf(summaries[i].PrintHumanReadable())
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									// TODO: learn to extract test name and append it to the kind instead of timestamp.
 | 
				
			||||||
 | 
									filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
 | 
				
			||||||
 | 
									if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
 | 
				
			||||||
 | 
										Logf("Failed to write file %v with test performance data: %v", filePath, err)
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							case "json":
 | 
				
			||||||
 | 
								fallthrough
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
 | 
								if TestContext.OutputPrintType != "json" {
 | 
				
			||||||
 | 
									Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								if TestContext.ReportDir == "" {
 | 
				
			||||||
 | 
									Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
 | 
				
			||||||
 | 
									Logf("Finished")
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									// TODO: learn to extract test name and append it to the kind instead of timestamp.
 | 
				
			||||||
 | 
									filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
 | 
				
			||||||
 | 
									if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
 | 
				
			||||||
 | 
										Logf("Failed to write file %v with test performance data: %v", filePath, err)
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -304,6 +304,8 @@ var _ = framework.KubeDescribe("Density", func() {
 | 
				
			|||||||
	var nodes *v1.NodeList
 | 
						var nodes *v1.NodeList
 | 
				
			||||||
	var masters sets.String
 | 
						var masters sets.String
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						testCaseBaseName := "density"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Gathers data prior to framework namespace teardown
 | 
						// Gathers data prior to framework namespace teardown
 | 
				
			||||||
	AfterEach(func() {
 | 
						AfterEach(func() {
 | 
				
			||||||
		saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
 | 
							saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
 | 
				
			||||||
@@ -319,17 +321,25 @@ var _ = framework.KubeDescribe("Density", func() {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
 | 
							framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							summaries := make([]framework.TestDataSummary, 0, 2)
 | 
				
			||||||
		// Verify latency metrics.
 | 
							// Verify latency metrics.
 | 
				
			||||||
		highLatencyRequests, err := framework.HighLatencyRequests(c)
 | 
							highLatencyRequests, metrics, err := framework.HighLatencyRequests(c)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
 | 
							if err == nil {
 | 
				
			||||||
 | 
								summaries = append(summaries, metrics)
 | 
				
			||||||
 | 
								Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Verify scheduler metrics.
 | 
							// Verify scheduler metrics.
 | 
				
			||||||
		// TODO: Reset metrics at the beginning of the test.
 | 
							// TODO: Reset metrics at the beginning of the test.
 | 
				
			||||||
		// We should do something similar to how we do it for APIserver.
 | 
							// We should do something similar to how we do it for APIserver.
 | 
				
			||||||
		if err = framework.VerifySchedulerLatency(c); err != nil {
 | 
							latency, err := framework.VerifySchedulerLatency(c)
 | 
				
			||||||
			framework.Logf("Warning: Scheduler latency not calculated, %v", err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
							if err == nil {
 | 
				
			||||||
 | 
								summaries = append(summaries, latency)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							framework.PrintSummaries(summaries, testCaseBaseName)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	options := framework.FrameworkOptions{
 | 
						options := framework.FrameworkOptions{
 | 
				
			||||||
@@ -338,7 +348,7 @@ var _ = framework.KubeDescribe("Density", func() {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// Explicitly put here, to delete namespace at the end of the test
 | 
						// Explicitly put here, to delete namespace at the end of the test
 | 
				
			||||||
	// (after measuring latency metrics, etc.).
 | 
						// (after measuring latency metrics, etc.).
 | 
				
			||||||
	f := framework.NewFramework("density", options, nil)
 | 
						f := framework.NewFramework(testCaseBaseName, options, nil)
 | 
				
			||||||
	f.NamespaceDeletionTimeout = time.Hour
 | 
						f.NamespaceDeletionTimeout = time.Hour
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BeforeEach(func() {
 | 
						BeforeEach(func() {
 | 
				
			||||||
@@ -718,7 +728,8 @@ var _ = framework.KubeDescribe("Density", func() {
 | 
				
			|||||||
				framework.PrintLatencies(e2eLag, "worst e2e total latencies")
 | 
									framework.PrintLatencies(e2eLag, "worst e2e total latencies")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// Test whether e2e pod startup time is acceptable.
 | 
									// Test whether e2e pod startup time is acceptable.
 | 
				
			||||||
				podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
 | 
									podStartupLatency := &framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
 | 
				
			||||||
 | 
									f.TestSummaries = append(f.TestSummaries, podStartupLatency)
 | 
				
			||||||
				framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
 | 
									framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
 | 
									framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -81,13 +81,20 @@ var _ = framework.KubeDescribe("Load capacity", func() {
 | 
				
			|||||||
	var configs []testutils.RunObjectConfig
 | 
						var configs []testutils.RunObjectConfig
 | 
				
			||||||
	var secretConfigs []*testutils.SecretConfig
 | 
						var secretConfigs []*testutils.SecretConfig
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						testCaseBaseName := "load"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Gathers metrics before teardown
 | 
						// Gathers metrics before teardown
 | 
				
			||||||
	// TODO add flag that allows to skip cleanup on failure
 | 
						// TODO add flag that allows to skip cleanup on failure
 | 
				
			||||||
	AfterEach(func() {
 | 
						AfterEach(func() {
 | 
				
			||||||
		// Verify latency metrics
 | 
							// Verify latency metrics
 | 
				
			||||||
		highLatencyRequests, err := framework.HighLatencyRequests(clientset)
 | 
							highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset)
 | 
				
			||||||
		framework.ExpectNoError(err, "Too many instances metrics above the threshold")
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
 | 
							if err == nil {
 | 
				
			||||||
 | 
								summaries := make([]framework.TestDataSummary, 0, 1)
 | 
				
			||||||
 | 
								summaries = append(summaries, metrics)
 | 
				
			||||||
 | 
								framework.PrintSummaries(summaries, testCaseBaseName)
 | 
				
			||||||
 | 
								Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// We assume a default throughput of 10 pods/second throughput.
 | 
						// We assume a default throughput of 10 pods/second throughput.
 | 
				
			||||||
@@ -106,7 +113,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
 | 
				
			|||||||
		ClientQPS:   float32(math.Max(50.0, float64(2*throughput))),
 | 
							ClientQPS:   float32(math.Max(50.0, float64(2*throughput))),
 | 
				
			||||||
		ClientBurst: int(math.Max(100.0, float64(4*throughput))),
 | 
							ClientBurst: int(math.Max(100.0, float64(4*throughput))),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	f := framework.NewFramework("load", options, nil)
 | 
						f := framework.NewFramework(testCaseBaseName, options, nil)
 | 
				
			||||||
	f.NamespaceDeletionTimeout = time.Hour
 | 
						f.NamespaceDeletionTimeout = time.Hour
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BeforeEach(func() {
 | 
						BeforeEach(func() {
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user