From 8b17db7e0c4431cd5fd9a5d9a3ab11b04e2f0a7e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 1 Aug 2018 12:46:59 +0200 Subject: [PATCH 01/19] e2e: modular framework Not all users of the E2E framework want to run cloud-provider specific tests. By splitting out the code it becomes possible to decide in a E2E test suite which providers are supported. This is achieved in two ways: - the framework calls certain functions through a provider interface instead of calling specific cloud provider functions directly - tests that are cloud-provider specific directly import the new provider packages The ingress test utilities are only needed by a few tests. Splitting them out into a separate package makes the framework simpler for test suites not using those tests. Fixes: #66649 --- hack/.golint_failures | 5 + test/e2e/e2e.go | 92 +- test/e2e/framework/framework.go | 34 +- .../framework/{ => ingress}/ingress_utils.go | 893 ++---------------- test/e2e/framework/provider.go | 139 +++ test/e2e/framework/providers/aws/aws.go | 129 +++ test/e2e/framework/providers/azure/azure.go | 80 ++ .../gce/firewall.go} | 51 +- .../gce/firewall_test.go} | 2 +- test/e2e/framework/providers/gce/gce.go | 376 ++++++++ test/e2e/framework/providers/gce/ingress.go | 817 ++++++++++++++++ .../framework/providers/kubemark/kubemark.go | 91 ++ test/e2e/framework/pv_util.go | 170 +--- test/e2e/framework/service_util.go | 44 +- test/e2e/framework/size.go | 99 +- test/e2e/framework/test_context.go | 26 +- test/e2e/framework/util.go | 124 +-- test/e2e/network/firewall.go | 56 +- test/e2e/network/ingress.go | 167 ++-- test/e2e/network/network_tiers.go | 7 +- test/e2e/network/scale/ingress.go | 14 +- .../network/scale/localrun/ingress_scale.go | 6 +- test/e2e/network/service.go | 5 +- test/e2e/scheduling/ubernetes_lite_volumes.go | 3 +- test/e2e/storage/pd.go | 7 +- test/e2e/storage/persistent_volumes-gce.go | 3 +- test/e2e/storage/regional_pd.go | 3 +- test/e2e/storage/volume_provisioning.go | 5 +- test/e2e/upgrades/ingress.go | 18 +- test/e2e_node/e2e_node_suite_test.go | 1 + 30 files changed, 1898 insertions(+), 1569 deletions(-) rename test/e2e/framework/{ => ingress}/ingress_utils.go (51%) create mode 100644 test/e2e/framework/provider.go create mode 100644 test/e2e/framework/providers/aws/aws.go create mode 100644 test/e2e/framework/providers/azure/azure.go rename test/e2e/framework/{firewall_util.go => providers/gce/firewall.go} (87%) rename test/e2e/framework/{firewall_util_test.go => providers/gce/firewall_test.go} (98%) create mode 100644 test/e2e/framework/providers/gce/gce.go create mode 100644 test/e2e/framework/providers/gce/ingress.go create mode 100644 test/e2e/framework/providers/kubemark/kubemark.go diff --git a/hack/.golint_failures b/hack/.golint_failures index c112d374c34..b19e164a030 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -737,7 +737,12 @@ test/e2e/autoscaling test/e2e/chaosmonkey test/e2e/common test/e2e/framework +test/e2e/framework/ingress test/e2e/framework/metrics +test/e2e/framework/providers/aws +test/e2e/framework/providers/azure +test/e2e/framework/providers/gce +test/e2e/framework/providers/kubemark test/e2e/framework/timer test/e2e/instrumentation test/e2e/instrumentation/logging diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 8b20070c534..593f8d2f4e3 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -34,8 +34,6 @@ import ( runtimeutils "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/logs" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/version" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" @@ -46,86 +44,18 @@ import ( // ensure auth plugins are loaded _ "k8s.io/client-go/plugin/pkg/client/auth" + + // ensure that cloud providers are loaded + _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" + _ "k8s.io/kubernetes/test/e2e/framework/providers/azure" + _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" + _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark" ) var ( cloudConfig = &framework.TestContext.CloudConfig ) -// setupProviderConfig validates and sets up cloudConfig based on framework.TestContext.Provider. -func setupProviderConfig() error { - switch framework.TestContext.Provider { - case "": - glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") - - case "gce", "gke": - framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider) - zone := framework.TestContext.CloudConfig.Zone - region := framework.TestContext.CloudConfig.Region - - var err error - if region == "" { - region, err = gcecloud.GetGCERegion(zone) - if err != nil { - return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) - } - } - managedZones := []string{} // Manage all zones in the region - if !framework.TestContext.CloudConfig.MultiZone { - managedZones = []string{zone} - } - - gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{ - ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint, - ProjectID: framework.TestContext.CloudConfig.ProjectID, - Region: region, - Zone: zone, - ManagedZones: managedZones, - NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network? - SubnetworkName: "", - NodeTags: nil, - NodeInstancePrefix: "", - TokenSource: nil, - UseMetadataServer: false, - AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}), - }) - - if err != nil { - return fmt.Errorf("Error building GCE/GKE provider: %v", err) - } - - cloudConfig.Provider = gceCloud - - // Arbitrarily pick one of the zones we have nodes in - if cloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone { - zones, err := gceCloud.GetAllZonesFromCloudProvider() - if err != nil { - return err - } - - cloudConfig.Zone, _ = zones.PopAny() - } - - case "aws": - if cloudConfig.Zone == "" { - return fmt.Errorf("gce-zone must be specified for AWS") - } - case "azure": - if cloudConfig.ConfigFile == "" { - return fmt.Errorf("config-file must be specified for Azure") - } - config, err := os.Open(cloudConfig.ConfigFile) - if err != nil { - framework.Logf("Couldn't open cloud provider configuration %s: %#v", - cloudConfig.ConfigFile, err) - } - defer config.Close() - cloudConfig.Provider, err = azure.NewCloud(config) - } - - return nil -} - // There are certain operations we only want to run once per overall test invocation // (such as deleting old namespaces, or verifying that all system pods are running. // Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite @@ -137,10 +67,6 @@ func setupProviderConfig() error { var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only on Ginkgo node 1 - if err := setupProviderConfig(); err != nil { - framework.Failf("Failed to setup provider config: %v", err) - } - switch framework.TestContext.Provider { case "gce", "gke": framework.LogClusterImageSources() @@ -214,12 +140,6 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { }, func(data []byte) { // Run on all Ginkgo nodes - - if cloudConfig.Provider == nil { - if err := setupProviderConfig(); err != nil { - framework.Failf("Failed to setup provider config: %v", err) - } - } }) // Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs). diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 495dab8592e..225c5a2e5c9 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -14,6 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package framework contains provider-independent helper code for +// building and running E2E tests with Ginkgo. The actual Ginkgo test +// suites gets assembled by combining this framework, the optional +// provider support code and specific tests via a separate .go file +// like Kubernetes' test/e2e.go. package framework import ( @@ -36,17 +41,14 @@ import ( "k8s.io/client-go/discovery" cacheddiscovery "k8s.io/client-go/discovery/cached" "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" scaleclient "k8s.io/client-go/scale" - "k8s.io/client-go/tools/clientcmd" csi "k8s.io/csi-api/pkg/client/clientset/versioned" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubemark" "k8s.io/kubernetes/test/e2e/framework/metrics" testutils "k8s.io/kubernetes/test/utils" @@ -108,8 +110,6 @@ type Framework struct { // or stdout if ReportDir is not set once test ends. TestSummaries []TestDataSummary - kubemarkControllerCloseChannel chan struct{} - // Place to keep ClusterAutoscaler metrics from before test in order to compute delta. clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection } @@ -210,25 +210,7 @@ func (f *Framework) BeforeEach() { resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) - if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil { - externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig) - externalConfig.QPS = f.Options.ClientQPS - externalConfig.Burst = f.Options.ClientBurst - Expect(err).NotTo(HaveOccurred()) - externalClient, err := clientset.NewForConfig(externalConfig) - Expect(err).NotTo(HaveOccurred()) - f.KubemarkExternalClusterClientSet = externalClient - f.kubemarkControllerCloseChannel = make(chan struct{}) - externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0) - kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0) - kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes() - go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel) - TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer) - Expect(err).NotTo(HaveOccurred()) - externalInformerFactory.Start(f.kubemarkControllerCloseChannel) - Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue()) - go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel) - } + TestContext.CloudConfig.Provider.FrameworkBeforeEach(f) } if !f.SkipNamespaceCreation { @@ -393,9 +375,7 @@ func (f *Framework) AfterEach() { } } - if TestContext.CloudConfig.KubemarkController != nil { - close(f.kubemarkControllerCloseChannel) - } + TestContext.CloudConfig.Provider.FrameworkAfterEach(f) // Report any flakes that were observed in the e2e test and reset. if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 { diff --git a/test/e2e/framework/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go similarity index 51% rename from test/e2e/framework/ingress_utils.go rename to test/e2e/framework/ingress/ingress_utils.go index 95fc3e07ced..219b718dd23 100644 --- a/test/e2e/framework/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -14,23 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package ingress import ( "bytes" "crypto/rand" "crypto/rsa" - "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" - "encoding/json" "encoding/pem" "fmt" "math/big" "net" "net/http" - "os/exec" "path/filepath" "regexp" "strconv" @@ -40,7 +37,6 @@ import ( "github.com/golang/glog" compute "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -51,11 +47,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/manifest" testutils "k8s.io/kubernetes/test/utils" - utilexec "k8s.io/utils/exec" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -85,22 +80,9 @@ const ( // ServiceApplicationProtocolKey annotation defined in ingress repository. ServiceApplicationProtocolKey = "service.alpha.kubernetes.io/app-protocols" - // all cloud resources created by the ingress controller start with this - // prefix. - k8sPrefix = "k8s-" - - // clusterDelimiter is the delimiter used by the ingress controller - // to split uid from other naming/metadata. - clusterDelimiter = "--" - // Name of the default http backend service defaultBackendName = "default-http-backend" - // Cloud resources created by the ingress controller older than this - // are automatically purged to prevent running out of quota. - // TODO(37335): write soak tests and bump this up to a week. - maxAge = 48 * time.Hour - // IngressManifestPath is the parent path to yaml test manifests. IngressManifestPath = "test/e2e/testing-manifests/ingress" @@ -113,14 +95,6 @@ const ( // General cloud resource poll timeout (eg: create static ip, firewall etc) cloudResourcePollTimeout = 5 * time.Minute - // Name of the config-map and key the ingress controller stores its uid in. - uidConfigMap = "ingress-uid" - uidKey = "uid" - - // GCE only allows names < 64 characters, and the loadbalancer controller inserts - // a single character of padding. - nameLenLimit = 62 - NEGAnnotation = "cloud.google.com/neg" NEGStatusAnnotation = "cloud.google.com/neg-status" NEGUpdateTimeout = 2 * time.Minute @@ -151,11 +125,11 @@ func (l *GLogger) Errorf(format string, args ...interface{}) { type E2ELogger struct{} func (l *E2ELogger) Infof(format string, args ...interface{}) { - Logf(format, args...) + framework.Logf(format, args...) } func (l *E2ELogger) Errorf(format string, args ...interface{}) { - Logf(format, args...) + framework.Logf(format, args...) } // IngressConformanceTests contains a closure with an entry and exit log line. @@ -229,7 +203,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m }) By("Checking that " + pathToFail + " is not exposed by polling for failure") route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail) - ExpectNoError(PollURL(route, updateURLMapHost, LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) + framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) }, fmt.Sprintf("Waiting for path updates to reflect in L7"), }, @@ -350,7 +324,7 @@ func BuildInsecureClient(timeout time.Duration) *http.Client { // Ingress, it's updated. func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) { host = strings.Join(hosts, ",") - Logf("Generating RSA cert for host %v", host) + framework.Logf("Generating RSA cert for host %v", host) cert, key, err := GenerateRSACerts(host, true) if err != nil { return @@ -367,771 +341,16 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin var s *v1.Secret if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil { // TODO: Retry the update. We don't really expect anything to conflict though. - Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) + framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) s.Data = secret.Data _, err = kubeClient.CoreV1().Secrets(namespace).Update(s) } else { - Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) + framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) _, err = kubeClient.CoreV1().Secrets(namespace).Create(secret) } return host, cert, key, err } -// GCEIngressController manages implementation details of Ingress on GCE/GKE. -type GCEIngressController struct { - Ns string - rcPath string - UID string - staticIPName string - rc *v1.ReplicationController - svc *v1.Service - Client clientset.Interface - Cloud CloudConfig -} - -func (cont *GCEIngressController) CleanupGCEIngressController() error { - return cont.CleanupGCEIngressControllerWithTimeout(LoadBalancerCleanupTimeout) -} - -// CleanupGCEIngressControllerWithTimeout calls the GCEIngressController.Cleanup(false) -// followed with deleting the static ip, and then a final GCEIngressController.Cleanup(true) -func (cont *GCEIngressController) CleanupGCEIngressControllerWithTimeout(timeout time.Duration) error { - pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) { - if err := cont.Cleanup(false); err != nil { - Logf("Monitoring glbc's cleanup of gce resources:\n%v", err) - return false, nil - } - return true, nil - }) - - // Always try to cleanup even if pollErr == nil, because the cleanup - // routine also purges old leaked resources based on creation timestamp. - By("Performing final delete of any remaining resources") - if cleanupErr := cont.Cleanup(true); cleanupErr != nil { - By(fmt.Sprintf("WARNING: possibly leaked resources: %v\n", cleanupErr)) - } else { - By("No resources leaked.") - } - - // Static-IP allocated on behalf of the test, never deleted by the - // controller. Delete this IP only after the controller has had a chance - // to cleanup or it might interfere with the controller, causing it to - // throw out confusing events. - if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { - if err := cont.deleteStaticIPs(); err != nil { - Logf("Failed to delete static-ip: %v\n", err) - return false, nil - } - return true, nil - }); ipErr != nil { - // If this is a persistent error, the suite will fail when we run out - // of quota anyway. - By(fmt.Sprintf("WARNING: possibly leaked static IP: %v\n", ipErr)) - } - - // Logging that the GLBC failed to cleanup GCE resources on ingress deletion - // See kubernetes/ingress#431 - if pollErr != nil { - return fmt.Errorf("error: L7 controller failed to delete all cloud resources on time. %v", pollErr) - } - return nil -} - -func (cont *GCEIngressController) getL7AddonUID() (string, error) { - Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) - cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) - if err != nil { - return "", err - } - if uid, ok := cm.Data[uidKey]; ok { - return uid, nil - } - return "", fmt.Errorf("Could not find cluster UID for L7 addon pod") -} - -func (cont *GCEIngressController) ListGlobalForwardingRules() []*compute.ForwardingRule { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - fwdList := []*compute.ForwardingRule{} - l, err := gceCloud.ListGlobalForwardingRules() - Expect(err).NotTo(HaveOccurred()) - for _, fwd := range l { - if cont.isOwned(fwd.Name) { - fwdList = append(fwdList, fwd) - } - } - return fwdList -} - -func (cont *GCEIngressController) deleteForwardingRule(del bool) string { - msg := "" - fwList := []compute.ForwardingRule{} - for _, regex := range []string{fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter), fmt.Sprintf("%vfws-.*%v.*", k8sPrefix, clusterDelimiter)} { - gcloudComputeResourceList("forwarding-rules", regex, cont.Cloud.ProjectID, &fwList) - if len(fwList) == 0 { - continue - } - for _, f := range fwList { - if !cont.canDelete(f.Name, f.CreationTimestamp, del) { - continue - } - if del { - GcloudComputeResourceDelete("forwarding-rules", f.Name, cont.Cloud.ProjectID, "--global") - } else { - msg += fmt.Sprintf("%v (forwarding rule)\n", f.Name) - } - } - } - return msg -} - -func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - ip, err := gceCloud.GetGlobalAddress(ipName) - Expect(err).NotTo(HaveOccurred()) - return ip -} - -func (cont *GCEIngressController) deleteAddresses(del bool) string { - msg := "" - ipList := []compute.Address{} - regex := fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter) - gcloudComputeResourceList("addresses", regex, cont.Cloud.ProjectID, &ipList) - if len(ipList) != 0 { - for _, ip := range ipList { - if !cont.canDelete(ip.Name, ip.CreationTimestamp, del) { - continue - } - if del { - GcloudComputeResourceDelete("addresses", ip.Name, cont.Cloud.ProjectID, "--global") - } else { - msg += fmt.Sprintf("%v (static-ip)\n", ip.Name) - } - } - } - return msg -} - -func (cont *GCEIngressController) ListTargetHttpProxies() []*compute.TargetHttpProxy { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - tpList := []*compute.TargetHttpProxy{} - l, err := gceCloud.ListTargetHttpProxies() - Expect(err).NotTo(HaveOccurred()) - for _, tp := range l { - if cont.isOwned(tp.Name) { - tpList = append(tpList, tp) - } - } - return tpList -} - -func (cont *GCEIngressController) ListTargetHttpsProxies() []*compute.TargetHttpsProxy { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - tpsList := []*compute.TargetHttpsProxy{} - l, err := gceCloud.ListTargetHttpsProxies() - Expect(err).NotTo(HaveOccurred()) - for _, tps := range l { - if cont.isOwned(tps.Name) { - tpsList = append(tpsList, tps) - } - } - return tpsList -} - -func (cont *GCEIngressController) deleteTargetProxy(del bool) string { - msg := "" - tpList := []compute.TargetHttpProxy{} - regex := fmt.Sprintf("%vtp-.*%v.*", k8sPrefix, clusterDelimiter) - gcloudComputeResourceList("target-http-proxies", regex, cont.Cloud.ProjectID, &tpList) - if len(tpList) != 0 { - for _, t := range tpList { - if !cont.canDelete(t.Name, t.CreationTimestamp, del) { - continue - } - if del { - GcloudComputeResourceDelete("target-http-proxies", t.Name, cont.Cloud.ProjectID) - } else { - msg += fmt.Sprintf("%v (target-http-proxy)\n", t.Name) - } - } - } - tpsList := []compute.TargetHttpsProxy{} - regex = fmt.Sprintf("%vtps-.*%v.*", k8sPrefix, clusterDelimiter) - gcloudComputeResourceList("target-https-proxies", regex, cont.Cloud.ProjectID, &tpsList) - if len(tpsList) != 0 { - for _, t := range tpsList { - if !cont.canDelete(t.Name, t.CreationTimestamp, del) { - continue - } - if del { - GcloudComputeResourceDelete("target-https-proxies", t.Name, cont.Cloud.ProjectID) - } else { - msg += fmt.Sprintf("%v (target-https-proxy)\n", t.Name) - } - } - } - return msg -} - -func (cont *GCEIngressController) ListUrlMaps() []*compute.UrlMap { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - umList := []*compute.UrlMap{} - l, err := gceCloud.ListUrlMaps() - Expect(err).NotTo(HaveOccurred()) - for _, um := range l { - if cont.isOwned(um.Name) { - umList = append(umList, um) - } - } - return umList -} - -func (cont *GCEIngressController) deleteURLMap(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - umList, err := gceCloud.ListUrlMaps() - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - return fmt.Sprintf("Failed to list url maps: %v", err) - } - if len(umList) == 0 { - return msg - } - for _, um := range umList { - if !cont.canDelete(um.Name, um.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting url-map: %s", um.Name) - if err := gceCloud.DeleteUrlMap(um.Name); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name) - } - } else { - msg += fmt.Sprintf("%v (url-map)\n", um.Name) - } - } - return msg -} - -func (cont *GCEIngressController) ListGlobalBackendServices() []*compute.BackendService { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - beList := []*compute.BackendService{} - l, err := gceCloud.ListGlobalBackendServices() - Expect(err).NotTo(HaveOccurred()) - for _, be := range l { - if cont.isOwned(be.Name) { - beList = append(beList, be) - } - } - return beList -} - -func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - beList, err := gceCloud.ListGlobalBackendServices() - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - return fmt.Sprintf("Failed to list backend services: %v", err) - } - if len(beList) == 0 { - Logf("No backend services found") - return msg - } - for _, be := range beList { - if !cont.canDelete(be.Name, be.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting backed-service: %s", be.Name) - if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err) - } - } else { - msg += fmt.Sprintf("%v (backend-service)\n", be.Name) - } - } - return msg -} - -func (cont *GCEIngressController) deleteHTTPHealthCheck(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - hcList, err := gceCloud.ListHttpHealthChecks() - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - return fmt.Sprintf("Failed to list HTTP health checks: %v", err) - } - if len(hcList) == 0 { - return msg - } - for _, hc := range hcList { - if !cont.canDelete(hc.Name, hc.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting http-health-check: %s", hc.Name) - if err := gceCloud.DeleteHttpHealthCheck(hc.Name); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name) - } - } else { - msg += fmt.Sprintf("%v (http-health-check)\n", hc.Name) - } - } - return msg -} - -func (cont *GCEIngressController) ListSslCertificates() []*compute.SslCertificate { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - sslList := []*compute.SslCertificate{} - l, err := gceCloud.ListSslCertificates() - Expect(err).NotTo(HaveOccurred()) - for _, ssl := range l { - if cont.isOwned(ssl.Name) { - sslList = append(sslList, ssl) - } - } - return sslList -} - -func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - sslList, err := gceCloud.ListSslCertificates() - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - return fmt.Sprintf("Failed to list ssl certificates: %v", err) - } - if len(sslList) != 0 { - for _, s := range sslList { - if !cont.canDelete(s.Name, s.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting ssl-certificate: %s", s.Name) - if err := gceCloud.DeleteSslCertificate(s.Name); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name) - } - } else { - msg += fmt.Sprintf("%v (ssl-certificate)\n", s.Name) - } - } - } - return msg -} - -func (cont *GCEIngressController) ListInstanceGroups() []*compute.InstanceGroup { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - igList := []*compute.InstanceGroup{} - l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone) - Expect(err).NotTo(HaveOccurred()) - for _, ig := range l { - if cont.isOwned(ig.Name) { - igList = append(igList, ig) - } - } - return igList -} - -func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - // TODO: E2E cloudprovider has only 1 zone, but the cluster can have many. - // We need to poll on all IGs across all zones. - igList, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone) - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - return fmt.Sprintf("Failed to list instance groups: %v", err) - } - if len(igList) == 0 { - return msg - } - for _, ig := range igList { - if !cont.canDelete(ig.Name, ig.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting instance-group: %s", ig.Name) - if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name) - } - } else { - msg += fmt.Sprintf("%v (instance-group)\n", ig.Name) - } - } - return msg -} - -func (cont *GCEIngressController) deleteNetworkEndpointGroup(del bool) (msg string) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - // TODO: E2E cloudprovider has only 1 zone, but the cluster can have many. - // We need to poll on all NEGs across all zones. - negList, err := gceCloud.ListNetworkEndpointGroup(cont.Cloud.Zone) - if err != nil { - if cont.isHTTPErrorCode(err, http.StatusNotFound) { - return msg - } - // Do not return error as NEG is still alpha. - Logf("Failed to list network endpoint group: %v", err) - return msg - } - if len(negList) == 0 { - return msg - } - for _, neg := range negList { - if !cont.canDeleteNEG(neg.Name, neg.CreationTimestamp, del) { - continue - } - if del { - Logf("Deleting network-endpoint-group: %s", neg.Name) - if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil && - !cont.isHTTPErrorCode(err, http.StatusNotFound) { - msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name) - } - } else { - msg += fmt.Sprintf("%v (network-endpoint-group)\n", neg.Name) - } - } - return msg -} - -// canDelete returns true if either the name ends in a suffix matching this -// controller's UID, or the creationTimestamp exceeds the maxAge and del is set -// to true. Always returns false if the name doesn't match that we expect for -// Ingress cloud resources. -func (cont *GCEIngressController) canDelete(resourceName, creationTimestamp string, delOldResources bool) bool { - // ignore everything not created by an ingress controller. - splitName := strings.Split(resourceName, clusterDelimiter) - if !strings.HasPrefix(resourceName, k8sPrefix) || len(splitName) != 2 { - return false - } - - // Resources created by the GLBC have a "0"" appended to the end if truncation - // occurred. Removing the zero allows the following match. - truncatedClusterUID := splitName[1] - if len(truncatedClusterUID) >= 1 && strings.HasSuffix(truncatedClusterUID, "0") { - truncatedClusterUID = truncatedClusterUID[:len(truncatedClusterUID)-1] - } - - // always delete things that are created by the current ingress controller. - // Because of resource name truncation, this looks for a common prefix - if strings.HasPrefix(cont.UID, truncatedClusterUID) { - return true - } - if !delOldResources { - return false - } - return canDeleteWithTimestamp(resourceName, creationTimestamp) -} - -// isOwned returns true if the resourceName ends in a suffix matching this -// controller UID. -func (cont *GCEIngressController) isOwned(resourceName string) bool { - return cont.canDelete(resourceName, "", false) -} - -// canDeleteNEG returns true if either the name contains this controller's UID, -// or the creationTimestamp exceeds the maxAge and del is set to true. -func (cont *GCEIngressController) canDeleteNEG(resourceName, creationTimestamp string, delOldResources bool) bool { - if !strings.HasPrefix(resourceName, "k8s") { - return false - } - - if strings.Contains(resourceName, cont.UID) { - return true - } - - if !delOldResources { - return false - } - - return canDeleteWithTimestamp(resourceName, creationTimestamp) -} - -func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool { - createdTime, err := time.Parse(time.RFC3339, creationTimestamp) - if err != nil { - Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err) - return false - } - if time.Since(createdTime) > maxAge { - Logf("%v created on %v IS too old", resourceName, creationTimestamp) - return true - } - return false -} - -// GetFirewallRuleName returns the name of the firewall used for the GCEIngressController. -func (cont *GCEIngressController) GetFirewallRuleName() string { - return fmt.Sprintf("%vfw-l7%v%v", k8sPrefix, clusterDelimiter, cont.UID) -} - -// GetFirewallRule returns the firewall used by the GCEIngressController. -// Causes a fatal error incase of an error. -// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other -// methods here to be consistent with rest of the code in this repo. -func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall { - fw, err := cont.GetFirewallRuleOrError() - Expect(err).NotTo(HaveOccurred()) - return fw -} - -// GetFirewallRule returns the firewall used by the GCEIngressController. -// Returns an error if that fails. -// TODO: Rename this to GetFirewallRule when the above method with that name is renamed. -func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - fwName := cont.GetFirewallRuleName() - return gceCloud.GetFirewall(fwName) -} - -func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) { - fwList := []compute.Firewall{} - regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter) - gcloudComputeResourceList("firewall-rules", regex, cont.Cloud.ProjectID, &fwList) - if len(fwList) != 0 { - for _, f := range fwList { - if !cont.canDelete(f.Name, f.CreationTimestamp, del) { - continue - } - if del { - GcloudComputeResourceDelete("firewall-rules", f.Name, cont.Cloud.ProjectID) - } else { - msg += fmt.Sprintf("%v (firewall rule)\n", f.Name) - } - } - } - return msg -} - -func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool { - apiErr, ok := err.(*googleapi.Error) - return ok && apiErr.Code == code -} - -// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend -func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) { - return cont.backendMode(svcPorts, "networkEndpointGroups") -} - -// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend -func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) { - return cont.backendMode(svcPorts, "instanceGroups") -} - -func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - beList, err := gceCloud.ListGlobalBackendServices() - if err != nil { - return false, fmt.Errorf("failed to list backend services: %v", err) - } - - hcList, err := gceCloud.ListHealthChecks() - if err != nil { - return false, fmt.Errorf("failed to list health checks: %v", err) - } - - uid := cont.UID - if len(uid) > 8 { - uid = uid[:8] - } - - matchingBackendService := 0 - for svcName, sp := range svcPorts { - match := false - bsMatch := &compute.BackendService{} - // Non-NEG BackendServices are named with the Nodeport in the name. - // NEG BackendServices' names contain the a sha256 hash of a string. - negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";") - negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8] - for _, bs := range beList { - if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) || - strings.Contains(bs.Name, negHash) { - match = true - bsMatch = bs - matchingBackendService += 1 - break - } - } - - if match { - for _, be := range bsMatch.Backends { - if !strings.Contains(be.Group, keyword) { - return false, nil - } - } - - // Check that the correct HealthCheck exists for the BackendService - hcMatch := false - for _, hc := range hcList { - if hc.Name == bsMatch.Name { - hcMatch = true - break - } - } - - if !hcMatch { - return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name) - } - } - } - return matchingBackendService == len(svcPorts), nil -} - -// Cleanup cleans up cloud resources. -// If del is false, it simply reports existing resources without deleting them. -// If dle is true, it deletes resources it finds acceptable (see canDelete func). -func (cont *GCEIngressController) Cleanup(del bool) error { - // Ordering is important here because we cannot delete resources that other - // resources hold references to. - errMsg := cont.deleteForwardingRule(del) - // Static IPs are named after forwarding rules. - errMsg += cont.deleteAddresses(del) - - errMsg += cont.deleteTargetProxy(del) - errMsg += cont.deleteURLMap(del) - errMsg += cont.deleteBackendService(del) - errMsg += cont.deleteHTTPHealthCheck(del) - - errMsg += cont.deleteInstanceGroup(del) - errMsg += cont.deleteNetworkEndpointGroup(del) - errMsg += cont.deleteFirewallRule(del) - errMsg += cont.deleteSSLCertificate(del) - - // TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told - // to unmarshal instance groups into the current vendored gce-client's understanding - // of the struct. - if errMsg == "" { - return nil - } - return fmt.Errorf(errMsg) -} - -// Init initializes the GCEIngressController with an UID -func (cont *GCEIngressController) Init() error { - uid, err := cont.getL7AddonUID() - if err != nil { - return err - } - cont.UID = uid - // There's a name limit imposed by GCE. The controller will truncate. - testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID) - if len(testName) > nameLenLimit { - Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit) - } else { - Logf("Detected cluster UID %v", cont.UID) - } - return nil -} - -// CreateStaticIP allocates a random static ip with the given name. Returns a string -// representation of the ip. Caller is expected to manage cleanup of the ip by -// invoking deleteStaticIPs. -func (cont *GCEIngressController) CreateStaticIP(name string) string { - gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) - addr := &compute.Address{Name: name} - if err := gceCloud.ReserveGlobalAddress(addr); err != nil { - if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil { - if cont.isHTTPErrorCode(delErr, http.StatusNotFound) { - Logf("Static ip with name %v was not allocated, nothing to delete", name) - } else { - Logf("Failed to delete static ip %v: %v", name, delErr) - } - } - Failf("Failed to allocate static ip %v: %v", name, err) - } - - ip, err := gceCloud.GetGlobalAddress(name) - if err != nil { - Failf("Failed to get newly created static ip %v: %v", name, err) - } - - cont.staticIPName = ip.Name - Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address) - return ip.Address -} - -// deleteStaticIPs delets all static-ips allocated through calls to -// CreateStaticIP. -func (cont *GCEIngressController) deleteStaticIPs() error { - if cont.staticIPName != "" { - if err := GcloudComputeResourceDelete("addresses", cont.staticIPName, cont.Cloud.ProjectID, "--global"); err == nil { - cont.staticIPName = "" - } else { - return err - } - } else { - e2eIPs := []compute.Address{} - gcloudComputeResourceList("addresses", "e2e-.*", cont.Cloud.ProjectID, &e2eIPs) - ips := []string{} - for _, ip := range e2eIPs { - ips = append(ips, ip.Name) - } - Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", ")) - } - return nil -} - -// gcloudComputeResourceList unmarshals json output of gcloud into given out interface. -func gcloudComputeResourceList(resource, regex, project string, out interface{}) { - // gcloud prints a message to stderr if it has an available update - // so we only look at stdout. - command := []string{ - "compute", resource, "list", - fmt.Sprintf("--filter='name ~ \"%q\"'", regex), - fmt.Sprintf("--project=%v", project), - "-q", "--format=json", - } - output, err := exec.Command("gcloud", command...).Output() - if err != nil { - errCode := -1 - errMsg := "" - if exitErr, ok := err.(utilexec.ExitError); ok { - errCode = exitErr.ExitStatus() - errMsg = exitErr.Error() - if osExitErr, ok := err.(*exec.ExitError); ok { - errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr)) - } - } - Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg) - } - if err := json.Unmarshal([]byte(output), out); err != nil { - Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) - } -} - -// GcloudComputeResourceDelete deletes the specified compute resource by name and project. -func GcloudComputeResourceDelete(resource, name, project string, args ...string) error { - Logf("Deleting %v: %v", resource, name) - argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...) - output, err := exec.Command("gcloud", argList...).CombinedOutput() - if err != nil { - Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) - } - return err -} - -// GcloudComputeResourceCreate creates a compute resource with a name and arguments. -func GcloudComputeResourceCreate(resource, name, project string, args ...string) error { - Logf("Creating %v in project %v: %v", resource, project, name) - argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...) - Logf("Running command: gcloud %+v", strings.Join(argsList, " ")) - output, err := exec.Command("gcloud", argsList...).CombinedOutput() - if err != nil { - Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err) - } - return err -} - // IngressTestJig holds the relevant state and parameters of the ingress test. type IngressTestJig struct { Client clientset.Interface @@ -1154,7 +373,7 @@ func NewIngressTestJig(c clientset.Interface) *IngressTestJig { return &IngressTestJig{ Client: c, RootCAs: map[string][]byte{}, - PollInterval: LoadBalancerPollInterval, + PollInterval: framework.LoadBalancerPollInterval, Logger: &E2ELogger{}, } } @@ -1174,28 +393,28 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m } j.Logger.Infof("creating replication controller") - RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) j.Logger.Infof("creating service") - RunKubectlOrDieInput(read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) if len(svcAnnotations) > 0 { svcList, err := j.Client.CoreV1().Services(ns).List(metav1.ListOptions{}) - ExpectNoError(err) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations = svcAnnotations _, err = j.Client.CoreV1().Services(ns).Update(&svc) - ExpectNoError(err) + framework.ExpectNoError(err) } } if exists("secret.yaml") { j.Logger.Infof("creating secret") - RunKubectlOrDieInput(read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) } j.Logger.Infof("Parsing ingress from %v", filepath.Join(manifestPath, "ing.yaml")) j.Ingress, err = manifest.IngressFromManifest(filepath.Join(manifestPath, "ing.yaml")) - ExpectNoError(err) + framework.ExpectNoError(err) j.Ingress.Namespace = ns j.Ingress.Annotations = map[string]string{IngressClassKey: j.Class} for k, v := range ingAnnotations { @@ -1203,7 +422,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m } j.Logger.Infof(fmt.Sprintf("creating " + j.Ingress.Name + " ingress")) j.Ingress, err = j.runCreate(j.Ingress) - ExpectNoError(err) + framework.ExpectNoError(err) } // runCreate runs the required command to create the given ingress. @@ -1212,11 +431,11 @@ func (j *IngressTestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing) } // Use kubemci to create a multicluster ingress. - filePath := TestContext.OutputDir + "/mci.yaml" + filePath := framework.TestContext.OutputDir + "/mci.yaml" if err := manifest.IngressToManifest(ing, filePath); err != nil { return nil, err } - _, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath)) + _, err := framework.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath)) return ing, err } @@ -1227,11 +446,11 @@ func (j *IngressTestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress } // Use kubemci to update a multicluster ingress. // kubemci does not have an update command. We use "create --force" to update an existing ingress. - filePath := TestContext.OutputDir + "/mci.yaml" + filePath := framework.TestContext.OutputDir + "/mci.yaml" if err := manifest.IngressToManifest(ing, filePath); err != nil { return nil, err } - _, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force") + _, err := framework.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force") return ing, err } @@ -1242,19 +461,19 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) { for i := 0; i < 3; i++ { j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) if err != nil { - Failf("failed to get ingress %s/%s: %v", ns, name, err) + framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) } update(j.Ingress) j.Ingress, err = j.runUpdate(j.Ingress) if err == nil { - DescribeIng(j.Ingress.Namespace) + framework.DescribeIng(j.Ingress.Namespace) return } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { - Failf("failed to update ingress %s/%s: %v", ns, name, err) + framework.Failf("failed to update ingress %s/%s: %v", ns, name, err) } } - Failf("too many retries updating ingress %s/%s", ns, name) + framework.Failf("too many retries updating ingress %s/%s", ns, name) } // AddHTTPS updates the ingress to add this secret for these hosts. @@ -1262,7 +481,7 @@ func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) { // TODO: Just create the secret in GetRootCAs once we're watching secrets in // the ingress controller. _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) - ExpectNoError(err) + framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName) j.Update(func(ing *extensions.Ingress) { ing.Spec.TLS = append(ing.Spec.TLS, extensions.IngressTLS{Hosts: hosts, SecretName: secretName}) @@ -1273,7 +492,7 @@ func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) { // SetHTTPS updates the ingress to use only this secret for these hosts. func (j *IngressTestJig) SetHTTPS(secretName string, hosts ...string) { _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) - ExpectNoError(err) + framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName) j.Update(func(ing *extensions.Ingress) { ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}} @@ -1312,7 +531,7 @@ func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) { var ok bool rootCA, ok = j.RootCAs[secretName] if !ok { - Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) + framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) } return } @@ -1341,11 +560,11 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress) error { return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil) } // Use kubemci to delete a multicluster ingress. - filePath := TestContext.OutputDir + "/mci.yaml" + filePath := framework.TestContext.OutputDir + "/mci.yaml" if err := manifest.IngressToManifest(ing, filePath); err != nil { return err } - _, err := RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath)) + _, err := framework.RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath)) return err } @@ -1353,7 +572,7 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress) error { // TODO(nikhiljindal): Update this to be able to return hostname as well. func getIngressAddressFromKubemci(name string) ([]string, error) { var addresses []string - out, err := RunKubemciCmd("get-status", name) + out, err := framework.RunKubemciCmd("get-status", name) if err != nil { return addresses, err } @@ -1438,7 +657,7 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st } route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) j.Logger.Infof("Testing route %v host %v with simple GET", route, rules.Host) - if err := PollURL(route, rules.Host, timeout, j.PollInterval, timeoutClient, false); err != nil { + if err := framework.PollURL(route, rules.Host, timeout, j.PollInterval, timeoutClient, false); err != nil { return err } } @@ -1448,8 +667,8 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st } func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) { - if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, LoadBalancerPollTimeout); err != nil { - Failf("error in waiting for ingress to get an address: %s", err) + if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil { + framework.Failf("error in waiting for ingress to get an address: %s", err) } } @@ -1481,21 +700,21 @@ func (j *IngressTestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, // Ingress. Hostnames and certificate need to be explicitly passed in. func (j *IngressTestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error { // Wait for the loadbalancer IP. - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, LoadBalancerPollTimeout) + address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) if err != nil { - return fmt.Errorf("Ingress failed to acquire an IP address within %v", LoadBalancerPollTimeout) + return fmt.Errorf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) } - return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, LoadBalancerPollTimeout) + return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, framework.LoadBalancerPollTimeout) } // VerifyURL polls for the given iterations, in intervals, and fails if the // given url returns a non-healthy http code even once. func (j *IngressTestJig) VerifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error { for i := 0; i < iterations; i++ { - b, err := SimpleGET(httpClient, route, host) + b, err := framework.SimpleGET(httpClient, route, host) if err != nil { - Logf(b) + framework.Logf(b) return err } j.Logger.Infof("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval) @@ -1506,11 +725,11 @@ func (j *IngressTestJig) VerifyURL(route, host string, iterations int, interval func (j *IngressTestJig) pollServiceNodePort(ns, name string, port int) error { // TODO: Curl all nodes? - u, err := GetNodePortURL(j.Client, ns, name, port) + u, err := framework.GetNodePortURL(j.Client, ns, name, port) if err != nil { return err } - return PollURL(u, "", 30*time.Second, j.PollInterval, &http.Client{Timeout: IngressReqTimeout}, false) + return framework.PollURL(u, "", 30*time.Second, j.PollInterval, &http.Client{Timeout: IngressReqTimeout}, false) } func (j *IngressTestJig) GetDefaultBackendNodePort() (int32, error) { @@ -1562,12 +781,12 @@ func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string] } // ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource -func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController, nodeTags []string) *compute.Firewall { +func (j *IngressTestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags []string) *compute.Firewall { nodePorts := j.GetIngressNodePorts(true) fw := compute.Firewall{} - fw.Name = gceController.GetFirewallRuleName() - fw.SourceRanges = gcecloud.LoadBalancerSrcRanges() + fw.Name = firewallRuleName + fw.SourceRanges = framework.TestContext.CloudConfig.Provider.LoadBalancerSrcRanges() fw.TargetTags = nodeTags fw.Allowed = []*compute.FirewallAllowed{ { @@ -1581,16 +800,16 @@ func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressCo // GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses. func (j *IngressTestJig) GetDistinctResponseFromIngress() (sets.String, error) { // Wait for the loadbalancer IP. - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, LoadBalancerPollTimeout) + address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) if err != nil { - Failf("Ingress failed to acquire an IP address within %v", LoadBalancerPollTimeout) + framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) } responses := sets.NewString() timeoutClient := &http.Client{Timeout: IngressReqTimeout} for i := 0; i < 100; i++ { url := fmt.Sprintf("http://%v", address) - res, err := SimpleGET(timeoutClient, url, "") + res, err := framework.SimpleGET(timeoutClient, url, "") if err != nil { j.Logger.Errorf("Failed to GET %q. Got responses: %q: %v", url, res, err) return responses, err @@ -1614,25 +833,25 @@ func (cont *NginxIngressController) Init() { read := func(file string) string { return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file), Fail)) } - Logf("initializing nginx ingress controller") - RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) + framework.Logf("initializing nginx ingress controller") + framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) - ExpectNoError(err) + framework.ExpectNoError(err) cont.rc = rc - Logf("waiting for pods with label %v", rc.Spec.Selector) + framework.Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) - ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) + framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) - ExpectNoError(err) + framework.ExpectNoError(err) if len(pods.Items) == 0 { - Failf("Failed to find nginx ingress controller pods with selector %v", sel) + framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) } cont.pod = &pods.Items[0] - cont.externalIP, err = GetHostExternalAddress(cont.Client, cont.pod) - ExpectNoError(err) - Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP) + cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod) + framework.ExpectNoError(err) + framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP) } func generateBacksideHTTPSIngressSpec(ns string) *extensions.Ingress { diff --git a/test/e2e/framework/provider.go b/test/e2e/framework/provider.go new file mode 100644 index 00000000000..feef1ca3377 --- /dev/null +++ b/test/e2e/framework/provider.go @@ -0,0 +1,139 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "sync" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +type Factory func() (ProviderInterface, error) + +var ( + providers = make(map[string]Factory) + mutex sync.Mutex +) + +// RegisterProvider is expected to be called during application init, +// typically by an init function in a provider package. +func RegisterProvider(name string, factory Factory) { + mutex.Lock() + defer mutex.Unlock() + if _, ok := providers[name]; ok { + panic(fmt.Sprintf("provider %s already registered", name)) + } + providers[name] = factory +} + +func init() { + // "local" can always be used. + RegisterProvider("local", func() (ProviderInterface, error) { + return NullProvider{}, nil + }) + // The empty string also works, but triggers a warning. + RegisterProvider("", func() (ProviderInterface, error) { + Logf("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") + return NullProvider{}, nil + }) +} + +// SetupProviderConfig validates the chosen provider and creates +// an interface instance for it. +func SetupProviderConfig(providerName string) (ProviderInterface, error) { + var err error + + mutex.Lock() + defer mutex.Unlock() + factory, ok := providers[providerName] + if !ok { + return nil, fmt.Errorf("The provider %s is unknown.", providerName) + } + provider, err := factory() + + return provider, err +} + +// ProviderInterface contains the implementation for certain +// provider-specific functionality. +type ProviderInterface interface { + FrameworkBeforeEach(f *Framework) + FrameworkAfterEach(f *Framework) + + ResizeGroup(group string, size int32) error + GetGroupNodes(group string) ([]string, error) + GroupSize(group string) (int, error) + + CreatePD(zone string) (string, error) + DeletePD(pdName string) error + CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) + DeletePVSource(pvSource *v1.PersistentVolumeSource) error + + CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) + + EnsureLoadBalancerResourcesDeleted(ip, portRange string) error + LoadBalancerSrcRanges() []string + EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) +} + +// NullProvider is the default implementation of the ProviderInterface +// which doesn't do anything. +type NullProvider struct{} + +func (n NullProvider) FrameworkBeforeEach(f *Framework) {} +func (n NullProvider) FrameworkAfterEach(f *Framework) {} + +func (n NullProvider) ResizeGroup(string, int32) error { + return fmt.Errorf("Provider does not support InstanceGroups") +} +func (n NullProvider) GetGroupNodes(group string) ([]string, error) { + return nil, fmt.Errorf("provider does not support InstanceGroups") +} +func (n NullProvider) GroupSize(group string) (int, error) { + return -1, fmt.Errorf("provider does not support InstanceGroups") +} + +func (n NullProvider) CreatePD(zone string) (string, error) { + return "", fmt.Errorf("provider does not support volume creation") +} +func (n NullProvider) DeletePD(pdName string) error { + return fmt.Errorf("provider does not support volume deletion") +} +func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { + return nil, fmt.Errorf("Provider not supported") +} +func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { + return fmt.Errorf("Provider not supported") +} + +func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { +} + +func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { + return nil +} +func (n NullProvider) LoadBalancerSrcRanges() []string { + return nil +} +func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) { + nop := func(svc *v1.Service) {} + return nop, nop +} + +var _ ProviderInterface = NullProvider{} diff --git a/test/e2e/framework/providers/aws/aws.go b/test/e2e/framework/providers/aws/aws.go new file mode 100644 index 00000000000..9de99045453 --- /dev/null +++ b/test/e2e/framework/providers/aws/aws.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" + + "k8s.io/api/core/v1" + awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" + "k8s.io/kubernetes/test/e2e/framework" +) + +func init() { + framework.RegisterProvider("aws", NewProvider) +} + +func NewProvider() (framework.ProviderInterface, error) { + if framework.TestContext.CloudConfig.Zone == "" { + return nil, fmt.Errorf("gce-zone must be specified for AWS") + } + return &Provider{}, nil +} + +type Provider struct { + framework.NullProvider +} + +func (p *Provider) ResizeGroup(group string, size int32) error { + client := autoscaling.New(session.New()) + return awscloud.ResizeInstanceGroup(client, group, int(size)) +} + +func (p *Provider) GroupSize(group string) (int, error) { + client := autoscaling.New(session.New()) + instanceGroup, err := awscloud.DescribeInstanceGroup(client, group) + if err != nil { + return -1, fmt.Errorf("error describing instance group: %v", err) + } + if instanceGroup == nil { + return -1, fmt.Errorf("instance group not found: %s", group) + } + return instanceGroup.CurrentSize() +} + +func (p *Provider) CreatePD(zone string) (string, error) { + client := newAWSClient(zone) + request := &ec2.CreateVolumeInput{} + request.AvailabilityZone = aws.String(zone) + request.Size = aws.Int64(10) + request.VolumeType = aws.String(awscloud.DefaultVolumeType) + response, err := client.CreateVolume(request) + if err != nil { + return "", err + } + + az := aws.StringValue(response.AvailabilityZone) + awsID := aws.StringValue(response.VolumeId) + + volumeName := "aws://" + az + "/" + awsID + return volumeName, nil +} + +func (p *Provider) DeletePD(pdName string) error { + client := newAWSClient("") + + tokens := strings.Split(pdName, "/") + awsVolumeID := tokens[len(tokens)-1] + + request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)} + _, err := client.DeleteVolume(request) + if err != nil { + if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { + framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName) + } else { + return fmt.Errorf("error deleting EBS volumes: %v", err) + } + } + return nil +} + +func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { + return &v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: diskName, + FSType: "ext3", + }, + }, nil +} + +func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { + return framework.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID) +} + +func newAWSClient(zone string) *ec2.EC2 { + var cfg *aws.Config + + if zone == "" { + zone = framework.TestContext.CloudConfig.Zone + } + if zone == "" { + framework.Logf("Warning: No AWS zone configured!") + cfg = nil + } else { + region := zone[:len(zone)-1] + cfg = &aws.Config{Region: aws.String(region)} + } + return ec2.New(session.New(), cfg) +} diff --git a/test/e2e/framework/providers/azure/azure.go b/test/e2e/framework/providers/azure/azure.go new file mode 100644 index 00000000000..e3f61606a8e --- /dev/null +++ b/test/e2e/framework/providers/azure/azure.go @@ -0,0 +1,80 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "os" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + "k8s.io/kubernetes/test/e2e/framework" +) + +func init() { + framework.RegisterProvider("azure", NewProvider) +} + +func NewProvider() (framework.ProviderInterface, error) { + if framework.TestContext.CloudConfig.ConfigFile == "" { + return nil, fmt.Errorf("config-file must be specified for Azure") + } + config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile) + if err != nil { + framework.Logf("Couldn't open cloud provider configuration %s: %#v", + framework.TestContext.CloudConfig.ConfigFile, err) + } + defer config.Close() + azureCloud, err := azure.NewCloud(config) + return &Provider{ + azureCloud: azureCloud.(*azure.Cloud), + }, err +} + +type Provider struct { + framework.NullProvider + + azureCloud *azure.Cloud +} + +func (p *Provider) CreatePD(zone string) (string, error) { + pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID())) + _, diskURI, _, err := p.azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */) + if err != nil { + return "", err + } + return diskURI, nil +} + +func (p *Provider) DeletePD(pdName string) error { + if err := p.azureCloud.DeleteVolume(pdName); err != nil { + framework.Logf("failed to delete Azure volume %q: %v", pdName, err) + return err + } + return nil +} + +func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) { + enable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "true"} + } + disable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "false"} + } + return +} diff --git a/test/e2e/framework/firewall_util.go b/test/e2e/framework/providers/gce/firewall.go similarity index 87% rename from test/e2e/framework/firewall_util.go rename to test/e2e/framework/providers/gce/firewall.go index 43dc93cbe21..6894cb36b9a 100644 --- a/test/e2e/framework/firewall_util.go +++ b/test/e2e/framework/providers/gce/firewall.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package gce import ( "fmt" @@ -23,15 +23,16 @@ import ( "strings" "time" + compute "google.golang.org/api/compute/v1" + gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/gomega" - compute "google.golang.org/api/compute/v1" ) const ( @@ -51,7 +52,7 @@ func MakeFirewallNameForLBService(name string) string { // ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall { if svc.Spec.Type != v1.ServiceTypeLoadBalancer { - Failf("can not construct firewall rule for non-loadbalancer type service") + framework.Failf("can not construct firewall rule for non-loadbalancer type service") } fw := compute.Firewall{} fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc)) @@ -77,7 +78,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal // ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall { if svc.Spec.Type != v1.ServiceTypeLoadBalancer { - Failf("can not construct firewall rule for non-loadbalancer type service") + framework.Failf("can not construct firewall rule for non-loadbalancer type service") } fw := compute.Firewall{} fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck) @@ -96,42 +97,6 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, return &fw } -// GetInstanceTags gets tags from GCE instance with given name. -func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags { - gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud) - res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, - instanceName).Do() - if err != nil { - Failf("Failed to get instance tags for %v: %v", instanceName, err) - } - return res.Tags -} - -// SetInstanceTags sets tags on GCE instance with given name. -func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string { - gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud) - // Re-get instance everytime because we need the latest fingerprint for updating metadata - resTags := GetInstanceTags(cloudConfig, instanceName) - _, err := gceCloud.ComputeServices().GA.Instances.SetTags( - cloudConfig.ProjectID, zone, instanceName, - &compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do() - if err != nil { - Failf("failed to set instance tags: %v", err) - } - Logf("Sent request to set tags %v on instance: %v", tags, instanceName) - return resTags.Items -} - -// GetNodeTags gets k8s node tag from one of the nodes -func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) []string { - nodes := GetReadySchedulableNodesOrDie(c) - if len(nodes.Items) == 0 { - Logf("GetNodeTags: Found 0 node.") - return []string{} - } - return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items -} - // GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster. // From cluster/gce/config-test.sh, master name is set up using below format: // MASTER_NAME="${INSTANCE_PREFIX}-master" @@ -437,7 +402,7 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset } func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) { - Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist) + framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist) var fw *compute.Firewall var err error diff --git a/test/e2e/framework/firewall_util_test.go b/test/e2e/framework/providers/gce/firewall_test.go similarity index 98% rename from test/e2e/framework/firewall_util_test.go rename to test/e2e/framework/providers/gce/firewall_test.go index 06dac516f40..647441dc962 100644 --- a/test/e2e/framework/firewall_util_test.go +++ b/test/e2e/framework/providers/gce/firewall_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package gce import "testing" diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go new file mode 100644 index 00000000000..29da3e4d796 --- /dev/null +++ b/test/e2e/framework/providers/gce/gce.go @@ -0,0 +1,376 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "fmt" + "net/http" + "os/exec" + "regexp" + "strings" + "time" + + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/test/e2e/framework" +) + +func init() { + framework.RegisterProvider("gce", factory) + framework.RegisterProvider("gke", factory) +} + +func factory() (framework.ProviderInterface, error) { + framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider) + zone := framework.TestContext.CloudConfig.Zone + region := framework.TestContext.CloudConfig.Region + + var err error + if region == "" { + region, err = gcecloud.GetGCERegion(zone) + if err != nil { + return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) + } + } + managedZones := []string{} // Manage all zones in the region + if !framework.TestContext.CloudConfig.MultiZone { + managedZones = []string{zone} + } + + gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{ + ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint, + ProjectID: framework.TestContext.CloudConfig.ProjectID, + Region: region, + Zone: zone, + ManagedZones: managedZones, + NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network? + SubnetworkName: "", + NodeTags: nil, + NodeInstancePrefix: "", + TokenSource: nil, + UseMetadataServer: false, + AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}), + }) + + if err != nil { + return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err) + } + + // Arbitrarily pick one of the zones we have nodes in + if framework.TestContext.CloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone { + zones, err := gceCloud.GetAllZonesFromCloudProvider() + if err != nil { + return nil, err + } + + framework.TestContext.CloudConfig.Zone, _ = zones.PopAny() + } + + return NewProvider(gceCloud), nil +} + +func NewProvider(gceCloud *gcecloud.GCECloud) framework.ProviderInterface { + return &Provider{ + gceCloud: gceCloud, + } +} + +type Provider struct { + framework.NullProvider + gceCloud *gcecloud.GCECloud +} + +func (p *Provider) ResizeGroup(group string, size int32) error { + // TODO: make this hit the compute API directly instead of shelling out to gcloud. + // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic + zone, err := getGCEZoneForGroup(group) + if err != nil { + return err + } + output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize", + group, fmt.Sprintf("--size=%v", size), + "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to resize node instance group %s: %s", group, output) + } + return nil +} + +func (p *Provider) GetGroupNodes(group string) ([]string, error) { + // TODO: make this hit the compute API directly instead of shelling out to gcloud. + // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic + zone, err := getGCEZoneForGroup(group) + if err != nil { + return nil, err + } + output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", + "list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID, + "--zone="+zone).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output) + } + re := regexp.MustCompile(".*RUNNING") + lines := re.FindAllString(string(output), -1) + for i, line := range lines { + lines[i] = line[:strings.Index(line, " ")] + } + return lines, nil +} + +func (p *Provider) GroupSize(group string) (int, error) { + // TODO: make this hit the compute API directly instead of shelling out to gcloud. + // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic + zone, err := getGCEZoneForGroup(group) + if err != nil { + return -1, err + } + output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", + "list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID, + "--zone="+zone).CombinedOutput() + if err != nil { + return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output) + } + re := regexp.MustCompile("RUNNING") + return len(re.FindAllString(string(output), -1)), nil +} + +func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { + project := framework.TestContext.CloudConfig.ProjectID + region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone) + if err != nil { + return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err) + } + + return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { + service := p.gceCloud.ComputeServices().GA + list, err := service.ForwardingRules.List(project, region).Do() + if err != nil { + return false, err + } + for _, item := range list.Items { + if item.PortRange == portRange && item.IPAddress == ip { + framework.Logf("found a load balancer: %v", item) + return false, nil + } + } + return true, nil + }) +} + +func getGCEZoneForGroup(group string) (string, error) { + zone := framework.TestContext.CloudConfig.Zone + if framework.TestContext.CloudConfig.MultiZone { + output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list", + "--project="+framework.TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output) + } + zone = strings.TrimSpace(string(output)) + } + return zone, nil +} + +func (p *Provider) CreatePD(zone string) (string, error) { + pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID())) + + if zone == "" && framework.TestContext.CloudConfig.MultiZone { + zones, err := p.gceCloud.GetAllZonesFromCloudProvider() + if err != nil { + return "", err + } + zone, _ = zones.PopAny() + } + + tags := map[string]string{} + if err := p.gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags); err != nil { + return "", err + } + return pdName, nil +} + +func (p *Provider) DeletePD(pdName string) error { + err := p.gceCloud.DeleteDisk(pdName) + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" { + // PD already exists, ignore error. + return nil + } + + framework.Logf("error deleting PD %q: %v", pdName, err) + } + return err +} + +func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { + return &v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: diskName, + FSType: "ext3", + ReadOnly: false, + }, + }, nil +} + +func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { + return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName) +} + +// CleanupResources cleans up GCE Service Type=LoadBalancer resources with +// the given name. The name is usually the UUID of the Service prefixed with an +// alpha-numeric character ('a') to work around cloudprovider rules. +func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { + if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) { + if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil { + framework.Logf("Still waiting for glbc to cleanup: %v", err) + return false, nil + } + return true, nil + }); pollErr != nil { + framework.Failf("Failed to cleanup service GCE resources.") + } +} + +func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) { + if region == "" { + // Attempt to parse region from zone if no region is given. + var err error + region, err = gcecloud.GetGCERegion(zone) + if err != nil { + return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) + } + } + if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil && + !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { + retErr = err + } + if err := p.gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil && + !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { + retErr = fmt.Errorf("%v\n%v", retErr, err) + + } + if err := p.gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil && + !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { + retErr = fmt.Errorf("%v\n%v", retErr, err) + } + clusterID, err := GetClusterID(c) + if err != nil { + retErr = fmt.Errorf("%v\n%v", retErr, err) + return + } + hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)} + hc, getErr := p.gceCloud.GetHttpHealthCheck(loadBalancerName) + if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) { + retErr = fmt.Errorf("%v\n%v", retErr, getErr) + return + } + if hc != nil { + hcNames = append(hcNames, hc.Name) + } + if err := p.gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil && + !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { + retErr = fmt.Errorf("%v\n%v", retErr, err) + } + return +} + +func (p *Provider) LoadBalancerSrcRanges() []string { + return gcecloud.LoadBalancerSrcRanges() +} + +func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) { + enable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)} + } + disable = func(svc *v1.Service) { + delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType) + } + return +} + +// GetInstanceTags gets tags from GCE instance with given name. +func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *compute.Tags { + gceCloud := cloudConfig.Provider.(*Provider).gceCloud + res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, + instanceName).Do() + if err != nil { + framework.Failf("Failed to get instance tags for %v: %v", instanceName, err) + } + return res.Tags +} + +// SetInstanceTags sets tags on GCE instance with given name. +func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone string, tags []string) []string { + gceCloud := cloudConfig.Provider.(*Provider).gceCloud + // Re-get instance everytime because we need the latest fingerprint for updating metadata + resTags := GetInstanceTags(cloudConfig, instanceName) + _, err := gceCloud.ComputeServices().GA.Instances.SetTags( + cloudConfig.ProjectID, zone, instanceName, + &compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do() + if err != nil { + framework.Failf("failed to set instance tags: %v", err) + } + framework.Logf("Sent request to set tags %v on instance: %v", tags, instanceName) + return resTags.Items +} + +// GetNodeTags gets k8s node tag from one of the nodes +func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string { + nodes := framework.GetReadySchedulableNodesOrDie(c) + if len(nodes.Items) == 0 { + framework.Logf("GetNodeTags: Found 0 node.") + return []string{} + } + return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items +} + +// IsHTTPErrorCode returns true if the error is a google api +// error matching the corresponding HTTP error code. +func IsGoogleAPIHTTPErrorCode(err error, code int) bool { + apiErr, ok := err.(*googleapi.Error) + return ok && apiErr.Code == code +} + +func GetGCECloud() (*gcecloud.GCECloud, error) { + p, ok := framework.TestContext.CloudConfig.Provider.(*Provider) + if !ok { + return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCE provider: %#v", framework.TestContext.CloudConfig.Provider) + } + return p.gceCloud, nil +} + +func GetClusterID(c clientset.Interface) (string, error) { + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) + if err != nil || cm == nil { + return "", fmt.Errorf("error getting cluster ID: %v", err) + } + clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster] + providerID, providerIDExists := cm.Data[gcecloud.UIDProvider] + if !clusterIDExists { + return "", fmt.Errorf("cluster ID not set") + } + if providerIDExists { + return providerID, nil + } + return clusterID, nil +} diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go new file mode 100644 index 00000000000..b11598fb756 --- /dev/null +++ b/test/e2e/framework/providers/gce/ingress.go @@ -0,0 +1,817 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + utilexec "k8s.io/utils/exec" +) + +const ( + // Name of the config-map and key the ingress controller stores its uid in. + uidConfigMap = "ingress-uid" + uidKey = "uid" + + // all cloud resources created by the ingress controller start with this + // prefix. + k8sPrefix = "k8s-" + + // clusterDelimiter is the delimiter used by the ingress controller + // to split uid from other naming/metadata. + clusterDelimiter = "--" + + // Cloud resources created by the ingress controller older than this + // are automatically purged to prevent running out of quota. + // TODO(37335): write soak tests and bump this up to a week. + maxAge = 48 * time.Hour + + // GCE only allows names < 64 characters, and the loadbalancer controller inserts + // a single character of padding. + nameLenLimit = 62 +) + +// GCEIngressController manages implementation details of Ingress on GCE/GKE. +type GCEIngressController struct { + Ns string + rcPath string + UID string + staticIPName string + rc *v1.ReplicationController + svc *v1.Service + Client clientset.Interface + Cloud framework.CloudConfig +} + +func (cont *GCEIngressController) CleanupGCEIngressController() error { + return cont.CleanupGCEIngressControllerWithTimeout(framework.LoadBalancerCleanupTimeout) +} + +// CleanupGCEIngressControllerWithTimeout calls the GCEIngressController.Cleanup(false) +// followed with deleting the static ip, and then a final GCEIngressController.Cleanup(true) +func (cont *GCEIngressController) CleanupGCEIngressControllerWithTimeout(timeout time.Duration) error { + pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) { + if err := cont.Cleanup(false); err != nil { + framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err) + return false, nil + } + return true, nil + }) + + // Always try to cleanup even if pollErr == nil, because the cleanup + // routine also purges old leaked resources based on creation timestamp. + By("Performing final delete of any remaining resources") + if cleanupErr := cont.Cleanup(true); cleanupErr != nil { + By(fmt.Sprintf("WARNING: possibly leaked resources: %v\n", cleanupErr)) + } else { + By("No resources leaked.") + } + + // Static-IP allocated on behalf of the test, never deleted by the + // controller. Delete this IP only after the controller has had a chance + // to cleanup or it might interfere with the controller, causing it to + // throw out confusing events. + if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { + if err := cont.deleteStaticIPs(); err != nil { + framework.Logf("Failed to delete static-ip: %v\n", err) + return false, nil + } + return true, nil + }); ipErr != nil { + // If this is a persistent error, the suite will fail when we run out + // of quota anyway. + By(fmt.Sprintf("WARNING: possibly leaked static IP: %v\n", ipErr)) + } + + // Logging that the GLBC failed to cleanup GCE resources on ingress deletion + // See kubernetes/ingress#431 + if pollErr != nil { + return fmt.Errorf("error: L7 controller failed to delete all cloud resources on time. %v", pollErr) + } + return nil +} + +func (cont *GCEIngressController) getL7AddonUID() (string, error) { + framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) + cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) + if err != nil { + return "", err + } + if uid, ok := cm.Data[uidKey]; ok { + return uid, nil + } + return "", fmt.Errorf("Could not find cluster UID for L7 addon pod") +} + +func (cont *GCEIngressController) ListGlobalForwardingRules() []*compute.ForwardingRule { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + fwdList := []*compute.ForwardingRule{} + l, err := gceCloud.ListGlobalForwardingRules() + Expect(err).NotTo(HaveOccurred()) + for _, fwd := range l { + if cont.isOwned(fwd.Name) { + fwdList = append(fwdList, fwd) + } + } + return fwdList +} + +func (cont *GCEIngressController) deleteForwardingRule(del bool) string { + msg := "" + fwList := []compute.ForwardingRule{} + for _, regex := range []string{fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter), fmt.Sprintf("%vfws-.*%v.*", k8sPrefix, clusterDelimiter)} { + gcloudComputeResourceList("forwarding-rules", regex, cont.Cloud.ProjectID, &fwList) + if len(fwList) == 0 { + continue + } + for _, f := range fwList { + if !cont.canDelete(f.Name, f.CreationTimestamp, del) { + continue + } + if del { + GcloudComputeResourceDelete("forwarding-rules", f.Name, cont.Cloud.ProjectID, "--global") + } else { + msg += fmt.Sprintf("%v (forwarding rule)\n", f.Name) + } + } + } + return msg +} + +func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + ip, err := gceCloud.GetGlobalAddress(ipName) + Expect(err).NotTo(HaveOccurred()) + return ip +} + +func (cont *GCEIngressController) deleteAddresses(del bool) string { + msg := "" + ipList := []compute.Address{} + regex := fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter) + gcloudComputeResourceList("addresses", regex, cont.Cloud.ProjectID, &ipList) + if len(ipList) != 0 { + for _, ip := range ipList { + if !cont.canDelete(ip.Name, ip.CreationTimestamp, del) { + continue + } + if del { + GcloudComputeResourceDelete("addresses", ip.Name, cont.Cloud.ProjectID, "--global") + } else { + msg += fmt.Sprintf("%v (static-ip)\n", ip.Name) + } + } + } + return msg +} + +func (cont *GCEIngressController) ListTargetHttpProxies() []*compute.TargetHttpProxy { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + tpList := []*compute.TargetHttpProxy{} + l, err := gceCloud.ListTargetHttpProxies() + Expect(err).NotTo(HaveOccurred()) + for _, tp := range l { + if cont.isOwned(tp.Name) { + tpList = append(tpList, tp) + } + } + return tpList +} + +func (cont *GCEIngressController) ListTargetHttpsProxies() []*compute.TargetHttpsProxy { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + tpsList := []*compute.TargetHttpsProxy{} + l, err := gceCloud.ListTargetHttpsProxies() + Expect(err).NotTo(HaveOccurred()) + for _, tps := range l { + if cont.isOwned(tps.Name) { + tpsList = append(tpsList, tps) + } + } + return tpsList +} + +func (cont *GCEIngressController) deleteTargetProxy(del bool) string { + msg := "" + tpList := []compute.TargetHttpProxy{} + regex := fmt.Sprintf("%vtp-.*%v.*", k8sPrefix, clusterDelimiter) + gcloudComputeResourceList("target-http-proxies", regex, cont.Cloud.ProjectID, &tpList) + if len(tpList) != 0 { + for _, t := range tpList { + if !cont.canDelete(t.Name, t.CreationTimestamp, del) { + continue + } + if del { + GcloudComputeResourceDelete("target-http-proxies", t.Name, cont.Cloud.ProjectID) + } else { + msg += fmt.Sprintf("%v (target-http-proxy)\n", t.Name) + } + } + } + tpsList := []compute.TargetHttpsProxy{} + regex = fmt.Sprintf("%vtps-.*%v.*", k8sPrefix, clusterDelimiter) + gcloudComputeResourceList("target-https-proxies", regex, cont.Cloud.ProjectID, &tpsList) + if len(tpsList) != 0 { + for _, t := range tpsList { + if !cont.canDelete(t.Name, t.CreationTimestamp, del) { + continue + } + if del { + GcloudComputeResourceDelete("target-https-proxies", t.Name, cont.Cloud.ProjectID) + } else { + msg += fmt.Sprintf("%v (target-https-proxy)\n", t.Name) + } + } + } + return msg +} + +func (cont *GCEIngressController) ListUrlMaps() []*compute.UrlMap { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + umList := []*compute.UrlMap{} + l, err := gceCloud.ListUrlMaps() + Expect(err).NotTo(HaveOccurred()) + for _, um := range l { + if cont.isOwned(um.Name) { + umList = append(umList, um) + } + } + return umList +} + +func (cont *GCEIngressController) deleteURLMap(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + umList, err := gceCloud.ListUrlMaps() + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + return fmt.Sprintf("Failed to list url maps: %v", err) + } + if len(umList) == 0 { + return msg + } + for _, um := range umList { + if !cont.canDelete(um.Name, um.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting url-map: %s", um.Name) + if err := gceCloud.DeleteUrlMap(um.Name); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name) + } + } else { + msg += fmt.Sprintf("%v (url-map)\n", um.Name) + } + } + return msg +} + +func (cont *GCEIngressController) ListGlobalBackendServices() []*compute.BackendService { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + beList := []*compute.BackendService{} + l, err := gceCloud.ListGlobalBackendServices() + Expect(err).NotTo(HaveOccurred()) + for _, be := range l { + if cont.isOwned(be.Name) { + beList = append(beList, be) + } + } + return beList +} + +func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + beList, err := gceCloud.ListGlobalBackendServices() + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + return fmt.Sprintf("Failed to list backend services: %v", err) + } + if len(beList) == 0 { + framework.Logf("No backend services found") + return msg + } + for _, be := range beList { + if !cont.canDelete(be.Name, be.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting backed-service: %s", be.Name) + if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err) + } + } else { + msg += fmt.Sprintf("%v (backend-service)\n", be.Name) + } + } + return msg +} + +func (cont *GCEIngressController) deleteHTTPHealthCheck(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + hcList, err := gceCloud.ListHttpHealthChecks() + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + return fmt.Sprintf("Failed to list HTTP health checks: %v", err) + } + if len(hcList) == 0 { + return msg + } + for _, hc := range hcList { + if !cont.canDelete(hc.Name, hc.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting http-health-check: %s", hc.Name) + if err := gceCloud.DeleteHttpHealthCheck(hc.Name); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name) + } + } else { + msg += fmt.Sprintf("%v (http-health-check)\n", hc.Name) + } + } + return msg +} + +func (cont *GCEIngressController) ListSslCertificates() []*compute.SslCertificate { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + sslList := []*compute.SslCertificate{} + l, err := gceCloud.ListSslCertificates() + Expect(err).NotTo(HaveOccurred()) + for _, ssl := range l { + if cont.isOwned(ssl.Name) { + sslList = append(sslList, ssl) + } + } + return sslList +} + +func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + sslList, err := gceCloud.ListSslCertificates() + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + return fmt.Sprintf("Failed to list ssl certificates: %v", err) + } + if len(sslList) != 0 { + for _, s := range sslList { + if !cont.canDelete(s.Name, s.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting ssl-certificate: %s", s.Name) + if err := gceCloud.DeleteSslCertificate(s.Name); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name) + } + } else { + msg += fmt.Sprintf("%v (ssl-certificate)\n", s.Name) + } + } + } + return msg +} + +func (cont *GCEIngressController) ListInstanceGroups() []*compute.InstanceGroup { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + igList := []*compute.InstanceGroup{} + l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone) + Expect(err).NotTo(HaveOccurred()) + for _, ig := range l { + if cont.isOwned(ig.Name) { + igList = append(igList, ig) + } + } + return igList +} + +func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + // TODO: E2E cloudprovider has only 1 zone, but the cluster can have many. + // We need to poll on all IGs across all zones. + igList, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone) + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + return fmt.Sprintf("Failed to list instance groups: %v", err) + } + if len(igList) == 0 { + return msg + } + for _, ig := range igList { + if !cont.canDelete(ig.Name, ig.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting instance-group: %s", ig.Name) + if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name) + } + } else { + msg += fmt.Sprintf("%v (instance-group)\n", ig.Name) + } + } + return msg +} + +func (cont *GCEIngressController) deleteNetworkEndpointGroup(del bool) (msg string) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + // TODO: E2E cloudprovider has only 1 zone, but the cluster can have many. + // We need to poll on all NEGs across all zones. + negList, err := gceCloud.ListNetworkEndpointGroup(cont.Cloud.Zone) + if err != nil { + if cont.isHTTPErrorCode(err, http.StatusNotFound) { + return msg + } + // Do not return error as NEG is still alpha. + framework.Logf("Failed to list network endpoint group: %v", err) + return msg + } + if len(negList) == 0 { + return msg + } + for _, neg := range negList { + if !cont.canDeleteNEG(neg.Name, neg.CreationTimestamp, del) { + continue + } + if del { + framework.Logf("Deleting network-endpoint-group: %s", neg.Name) + if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil && + !cont.isHTTPErrorCode(err, http.StatusNotFound) { + msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name) + } + } else { + msg += fmt.Sprintf("%v (network-endpoint-group)\n", neg.Name) + } + } + return msg +} + +// canDelete returns true if either the name ends in a suffix matching this +// controller's UID, or the creationTimestamp exceeds the maxAge and del is set +// to true. Always returns false if the name doesn't match that we expect for +// Ingress cloud resources. +func (cont *GCEIngressController) canDelete(resourceName, creationTimestamp string, delOldResources bool) bool { + // ignore everything not created by an ingress controller. + splitName := strings.Split(resourceName, clusterDelimiter) + if !strings.HasPrefix(resourceName, k8sPrefix) || len(splitName) != 2 { + return false + } + + // Resources created by the GLBC have a "0"" appended to the end if truncation + // occurred. Removing the zero allows the following match. + truncatedClusterUID := splitName[1] + if len(truncatedClusterUID) >= 1 && strings.HasSuffix(truncatedClusterUID, "0") { + truncatedClusterUID = truncatedClusterUID[:len(truncatedClusterUID)-1] + } + + // always delete things that are created by the current ingress controller. + // Because of resource name truncation, this looks for a common prefix + if strings.HasPrefix(cont.UID, truncatedClusterUID) { + return true + } + if !delOldResources { + return false + } + return canDeleteWithTimestamp(resourceName, creationTimestamp) +} + +// isOwned returns true if the resourceName ends in a suffix matching this +// controller UID. +func (cont *GCEIngressController) isOwned(resourceName string) bool { + return cont.canDelete(resourceName, "", false) +} + +// canDeleteNEG returns true if either the name contains this controller's UID, +// or the creationTimestamp exceeds the maxAge and del is set to true. +func (cont *GCEIngressController) canDeleteNEG(resourceName, creationTimestamp string, delOldResources bool) bool { + if !strings.HasPrefix(resourceName, "k8s") { + return false + } + + if strings.Contains(resourceName, cont.UID) { + return true + } + + if !delOldResources { + return false + } + + return canDeleteWithTimestamp(resourceName, creationTimestamp) +} + +func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool { + createdTime, err := time.Parse(time.RFC3339, creationTimestamp) + if err != nil { + framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err) + return false + } + if time.Since(createdTime) > maxAge { + framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp) + return true + } + return false +} + +// GetFirewallRuleName returns the name of the firewall used for the GCEIngressController. +func (cont *GCEIngressController) GetFirewallRuleName() string { + return fmt.Sprintf("%vfw-l7%v%v", k8sPrefix, clusterDelimiter, cont.UID) +} + +// GetFirewallRule returns the firewall used by the GCEIngressController. +// Causes a fatal error incase of an error. +// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other +// methods here to be consistent with rest of the code in this repo. +func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall { + fw, err := cont.GetFirewallRuleOrError() + Expect(err).NotTo(HaveOccurred()) + return fw +} + +// GetFirewallRule returns the firewall used by the GCEIngressController. +// Returns an error if that fails. +// TODO: Rename this to GetFirewallRule when the above method with that name is renamed. +func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + fwName := cont.GetFirewallRuleName() + return gceCloud.GetFirewall(fwName) +} + +func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) { + fwList := []compute.Firewall{} + regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter) + gcloudComputeResourceList("firewall-rules", regex, cont.Cloud.ProjectID, &fwList) + if len(fwList) != 0 { + for _, f := range fwList { + if !cont.canDelete(f.Name, f.CreationTimestamp, del) { + continue + } + if del { + GcloudComputeResourceDelete("firewall-rules", f.Name, cont.Cloud.ProjectID) + } else { + msg += fmt.Sprintf("%v (firewall rule)\n", f.Name) + } + } + } + return msg +} + +func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool { + apiErr, ok := err.(*googleapi.Error) + return ok && apiErr.Code == code +} + +// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend +func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) { + return cont.backendMode(svcPorts, "networkEndpointGroups") +} + +// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend +func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) { + return cont.backendMode(svcPorts, "instanceGroups") +} + +func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + beList, err := gceCloud.ListGlobalBackendServices() + if err != nil { + return false, fmt.Errorf("failed to list backend services: %v", err) + } + + hcList, err := gceCloud.ListHealthChecks() + if err != nil { + return false, fmt.Errorf("failed to list health checks: %v", err) + } + + uid := cont.UID + if len(uid) > 8 { + uid = uid[:8] + } + + matchingBackendService := 0 + for svcName, sp := range svcPorts { + match := false + bsMatch := &compute.BackendService{} + // Non-NEG BackendServices are named with the Nodeport in the name. + // NEG BackendServices' names contain the a sha256 hash of a string. + negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";") + negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8] + for _, bs := range beList { + if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) || + strings.Contains(bs.Name, negHash) { + match = true + bsMatch = bs + matchingBackendService += 1 + break + } + } + + if match { + for _, be := range bsMatch.Backends { + if !strings.Contains(be.Group, keyword) { + return false, nil + } + } + + // Check that the correct HealthCheck exists for the BackendService + hcMatch := false + for _, hc := range hcList { + if hc.Name == bsMatch.Name { + hcMatch = true + break + } + } + + if !hcMatch { + return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name) + } + } + } + return matchingBackendService == len(svcPorts), nil +} + +// Cleanup cleans up cloud resources. +// If del is false, it simply reports existing resources without deleting them. +// If dle is true, it deletes resources it finds acceptable (see canDelete func). +func (cont *GCEIngressController) Cleanup(del bool) error { + // Ordering is important here because we cannot delete resources that other + // resources hold references to. + errMsg := cont.deleteForwardingRule(del) + // Static IPs are named after forwarding rules. + errMsg += cont.deleteAddresses(del) + + errMsg += cont.deleteTargetProxy(del) + errMsg += cont.deleteURLMap(del) + errMsg += cont.deleteBackendService(del) + errMsg += cont.deleteHTTPHealthCheck(del) + + errMsg += cont.deleteInstanceGroup(del) + errMsg += cont.deleteNetworkEndpointGroup(del) + errMsg += cont.deleteFirewallRule(del) + errMsg += cont.deleteSSLCertificate(del) + + // TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told + // to unmarshal instance groups into the current vendored gce-client's understanding + // of the struct. + if errMsg == "" { + return nil + } + return fmt.Errorf(errMsg) +} + +// Init initializes the GCEIngressController with an UID +func (cont *GCEIngressController) Init() error { + uid, err := cont.getL7AddonUID() + if err != nil { + return err + } + cont.UID = uid + // There's a name limit imposed by GCE. The controller will truncate. + testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID) + if len(testName) > nameLenLimit { + framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit) + } else { + framework.Logf("Detected cluster UID %v", cont.UID) + } + return nil +} + +// CreateStaticIP allocates a random static ip with the given name. Returns a string +// representation of the ip. Caller is expected to manage cleanup of the ip by +// invoking deleteStaticIPs. +func (cont *GCEIngressController) CreateStaticIP(name string) string { + gceCloud := cont.Cloud.Provider.(*Provider).gceCloud + addr := &compute.Address{Name: name} + if err := gceCloud.ReserveGlobalAddress(addr); err != nil { + if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil { + if cont.isHTTPErrorCode(delErr, http.StatusNotFound) { + framework.Logf("Static ip with name %v was not allocated, nothing to delete", name) + } else { + framework.Logf("Failed to delete static ip %v: %v", name, delErr) + } + } + framework.Failf("Failed to allocate static ip %v: %v", name, err) + } + + ip, err := gceCloud.GetGlobalAddress(name) + if err != nil { + framework.Failf("Failed to get newly created static ip %v: %v", name, err) + } + + cont.staticIPName = ip.Name + framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address) + return ip.Address +} + +// deleteStaticIPs delets all static-ips allocated through calls to +// CreateStaticIP. +func (cont *GCEIngressController) deleteStaticIPs() error { + if cont.staticIPName != "" { + if err := GcloudComputeResourceDelete("addresses", cont.staticIPName, cont.Cloud.ProjectID, "--global"); err == nil { + cont.staticIPName = "" + } else { + return err + } + } else { + e2eIPs := []compute.Address{} + gcloudComputeResourceList("addresses", "e2e-.*", cont.Cloud.ProjectID, &e2eIPs) + ips := []string{} + for _, ip := range e2eIPs { + ips = append(ips, ip.Name) + } + framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", ")) + } + return nil +} + +// gcloudComputeResourceList unmarshals json output of gcloud into given out interface. +func gcloudComputeResourceList(resource, regex, project string, out interface{}) { + // gcloud prints a message to stderr if it has an available update + // so we only look at stdout. + command := []string{ + "compute", resource, "list", + fmt.Sprintf("--filter='name ~ \"%q\"'", regex), + fmt.Sprintf("--project=%v", project), + "-q", "--format=json", + } + output, err := exec.Command("gcloud", command...).Output() + if err != nil { + errCode := -1 + errMsg := "" + if exitErr, ok := err.(utilexec.ExitError); ok { + errCode = exitErr.ExitStatus() + errMsg = exitErr.Error() + if osExitErr, ok := err.(*exec.ExitError); ok { + errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr)) + } + } + framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg) + } + if err := json.Unmarshal([]byte(output), out); err != nil { + framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) + } +} + +// GcloudComputeResourceDelete deletes the specified compute resource by name and project. +func GcloudComputeResourceDelete(resource, name, project string, args ...string) error { + framework.Logf("Deleting %v: %v", resource, name) + argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...) + output, err := exec.Command("gcloud", argList...).CombinedOutput() + if err != nil { + framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) + } + return err +} + +// GcloudComputeResourceCreate creates a compute resource with a name and arguments. +func GcloudComputeResourceCreate(resource, name, project string, args ...string) error { + framework.Logf("Creating %v in project %v: %v", resource, project, name) + argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...) + framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " ")) + output, err := exec.Command("gcloud", argsList...).CombinedOutput() + if err != nil { + framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err) + } + return err +} diff --git a/test/e2e/framework/providers/kubemark/kubemark.go b/test/e2e/framework/providers/kubemark/kubemark.go new file mode 100644 index 00000000000..6b9552b3322 --- /dev/null +++ b/test/e2e/framework/providers/kubemark/kubemark.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubemark + +import ( + "flag" + "fmt" + + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/pkg/kubemark" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/gomega" +) + +var ( + kubemarkExternalKubeConfig = flag.String(fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.") +) + +func init() { + framework.RegisterProvider("kubemark", NewProvider) +} + +func NewProvider() (framework.ProviderInterface, error) { + // Actual initialization happens when the e2e framework gets constructed. + return &Provider{}, nil +} + +type Provider struct { + framework.NullProvider + controller *kubemark.KubemarkController + closeChannel chan struct{} +} + +func (p *Provider) ResizeGroup(group string, size int32) error { + return p.controller.SetNodeGroupSize(group, int(size)) +} + +func (p *Provider) GetGroupNodes(group string) ([]string, error) { + return p.controller.GetNodeNamesForNodeGroup(group) +} + +func (p *Provider) FrameworkBeforeEach(f *framework.Framework) { + if *kubemarkExternalKubeConfig != "" && p.controller == nil { + externalConfig, err := clientcmd.BuildConfigFromFlags("", *kubemarkExternalKubeConfig) + externalConfig.QPS = f.Options.ClientQPS + externalConfig.Burst = f.Options.ClientBurst + Expect(err).NotTo(HaveOccurred()) + externalClient, err := clientset.NewForConfig(externalConfig) + Expect(err).NotTo(HaveOccurred()) + f.KubemarkExternalClusterClientSet = externalClient + p.closeChannel = make(chan struct{}) + externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0) + kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0) + kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes() + go kubemarkNodeInformer.Informer().Run(p.closeChannel) + p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer) + Expect(err).NotTo(HaveOccurred()) + externalInformerFactory.Start(p.closeChannel) + Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(BeTrue()) + go p.controller.Run(p.closeChannel) + } +} + +func (p *Provider) FrameworkAfterEach(f *framework.Framework) { + if p.closeChannel != nil { + close(p.closeChannel) + p.controller = nil + p.closeChannel = nil + } +} + +func (p *Provider) GroupSize(group string) (int, error) { + return p.controller.GetNodeGroupSize(group) +} diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index be9a157d933..53483fd193b 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -18,16 +18,9 @@ package framework import ( "fmt" - "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" . "github.com/onsi/ginkgo" - "google.golang.org/api/googleapi" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -36,8 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" - awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/volume/util" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -557,19 +548,6 @@ func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc return // note: named return value } -// Sanity check for GCE testing. Verify the persistent disk attached to the node. -func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) (bool, error) { - gceCloud, err := GetGCECloud() - if err != nil { - return false, fmt.Errorf("GetGCECloud error: %v", err) - } - isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName) - if err != nil { - return false, fmt.Errorf("cannot verify if GCE disk is attached: %v", err) - } - return isAttached, nil -} - // Return a pvckey struct. func makePvcKey(ns, name string) types.NamespacedName { return types.NamespacedName{Namespace: ns, Name: name} @@ -690,131 +668,15 @@ func DeletePDWithRetry(diskName string) error { return fmt.Errorf("unable to delete PD %q: %v", diskName, err) } -func newAWSClient(zone string) *ec2.EC2 { - var cfg *aws.Config - - if zone == "" { - zone = TestContext.CloudConfig.Zone - } - if zone == "" { - glog.Warning("No AWS zone configured!") - cfg = nil - } else { - region := zone[:len(zone)-1] - cfg = &aws.Config{Region: aws.String(region)} - } - return ec2.New(session.New(), cfg) -} - func createPD(zone string) (string, error) { if zone == "" { zone = TestContext.CloudConfig.Zone } - - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID())) - - gceCloud, err := GetGCECloud() - if err != nil { - return "", err - } - - if zone == "" && TestContext.CloudConfig.MultiZone { - zones, err := gceCloud.GetAllZonesFromCloudProvider() - if err != nil { - return "", err - } - zone, _ = zones.PopAny() - } - - tags := map[string]string{} - err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags) - if err != nil { - return "", err - } - return pdName, nil - } else if TestContext.Provider == "aws" { - client := newAWSClient(zone) - request := &ec2.CreateVolumeInput{} - request.AvailabilityZone = aws.String(zone) - request.Size = aws.Int64(10) - request.VolumeType = aws.String(awscloud.DefaultVolumeType) - response, err := client.CreateVolume(request) - if err != nil { - return "", err - } - - az := aws.StringValue(response.AvailabilityZone) - awsID := aws.StringValue(response.VolumeId) - - volumeName := "aws://" + az + "/" + awsID - return volumeName, nil - } else if TestContext.Provider == "azure" { - pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID())) - azureCloud, err := GetAzureCloud() - - if err != nil { - return "", err - } - - _, diskURI, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */) - if err != nil { - return "", err - } - return diskURI, nil - } else { - return "", fmt.Errorf("provider does not support volume creation") - } + return TestContext.CloudConfig.Provider.CreatePD(zone) } func deletePD(pdName string) error { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - gceCloud, err := GetGCECloud() - if err != nil { - return err - } - - err = gceCloud.DeleteDisk(pdName) - - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" { - // PD already exists, ignore error. - return nil - } - - Logf("error deleting PD %q: %v", pdName, err) - } - return err - } else if TestContext.Provider == "aws" { - client := newAWSClient("") - - tokens := strings.Split(pdName, "/") - awsVolumeID := tokens[len(tokens)-1] - - request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)} - _, err := client.DeleteVolume(request) - if err != nil { - if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { - Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName) - } else { - return fmt.Errorf("error deleting EBS volumes: %v", err) - } - } - return nil - } else if TestContext.Provider == "azure" { - azureCloud, err := GetAzureCloud() - if err != nil { - return err - } - err = azureCloud.DeleteVolume(pdName) - if err != nil { - Logf("failed to delete Azure volume %q: %v", pdName, err) - return err - } - return nil - } else { - return fmt.Errorf("provider does not support volume deletion") - } + return TestContext.CloudConfig.Provider.DeletePD(pdName) } // Returns a pod definition based on the namespace. The pod references the PVC's @@ -1091,33 +953,9 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) { if err != nil { return nil, err } - - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - return &v1.PersistentVolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: diskName, - FSType: "ext3", - ReadOnly: false, - }, - }, nil - } else if TestContext.Provider == "aws" { - return &v1.PersistentVolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ - VolumeID: diskName, - FSType: "ext3", - }, - }, nil - } else { - return nil, fmt.Errorf("Provider not supported") - } + return TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName) } func DeletePVSource(pvSource *v1.PersistentVolumeSource) error { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - return DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName) - } else if TestContext.Provider == "aws" { - return DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID) - } else { - return fmt.Errorf("Provider not supported") - } + return TestContext.CloudConfig.Provider.DeletePVSource(pvSource) } diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 94c7b4871eb..f4f59620e67 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -40,8 +40,6 @@ import ( "k8s.io/client-go/util/retry" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - azurecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1374,23 +1372,7 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI } func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - CleanupServiceGCEResources(c, loadBalancerName, region, zone) - } - - // TODO: we need to add this function with other cloud providers, if there is a need. -} - -func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, region, zone string) { - if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) { - if err := CleanupGCEResources(c, loadBalancerName, region, zone); err != nil { - Logf("Still waiting for glbc to cleanup: %v", err) - return false, nil - } - return true, nil - }); pollErr != nil { - Failf("Failed to cleanup service GCE resources.") - } + TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) } func DescribeSvc(ns string) { @@ -1424,29 +1406,9 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select } // EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer -// setting for the supported cloud providers: GCE/GKE and Azure +// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others. func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) { - enable = func(svc *v1.Service) {} - disable = func(svc *v1.Service) {} - - switch TestContext.Provider { - case "gce", "gke": - enable = func(svc *v1.Service) { - svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)} - } - disable = func(svc *v1.Service) { - delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType) - } - case "azure": - enable = func(svc *v1.Service) { - svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "true"} - } - disable = func(svc *v1.Service) { - svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "false"} - } - } - - return + return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB() } func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { diff --git a/test/e2e/framework/size.go b/test/e2e/framework/size.go index b2eaffc1475..e2e0624ab32 100644 --- a/test/e2e/framework/size.go +++ b/test/e2e/framework/size.go @@ -18,14 +18,7 @@ package framework import ( "fmt" - "os/exec" - "regexp" - "strings" "time" - - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/autoscaling" - awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" ) const ( @@ -38,88 +31,15 @@ func ResizeGroup(group string, size int32) error { CoreDump(TestContext.ReportDir) defer CoreDump(TestContext.ReportDir) } - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - // TODO: make this hit the compute API directly instead of shelling out to gcloud. - // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic - zone, err := getGCEZoneForGroup(group) - if err != nil { - return err - } - output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize", - group, fmt.Sprintf("--size=%v", size), - "--project="+TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to resize node instance group %s: %s", group, output) - } - return nil - } else if TestContext.Provider == "aws" { - client := autoscaling.New(session.New()) - return awscloud.ResizeInstanceGroup(client, group, int(size)) - } else if TestContext.Provider == "kubemark" { - return TestContext.CloudConfig.KubemarkController.SetNodeGroupSize(group, int(size)) - } else { - return fmt.Errorf("Provider does not support InstanceGroups") - } + return TestContext.CloudConfig.Provider.ResizeGroup(group, size) } func GetGroupNodes(group string) ([]string, error) { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - // TODO: make this hit the compute API directly instead of shelling out to gcloud. - // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic - zone, err := getGCEZoneForGroup(group) - if err != nil { - return nil, err - } - output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", - "list-instances", group, "--project="+TestContext.CloudConfig.ProjectID, - "--zone="+zone).CombinedOutput() - if err != nil { - return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output) - } - re := regexp.MustCompile(".*RUNNING") - lines := re.FindAllString(string(output), -1) - for i, line := range lines { - lines[i] = line[:strings.Index(line, " ")] - } - return lines, nil - } else if TestContext.Provider == "kubemark" { - return TestContext.CloudConfig.KubemarkController.GetNodeNamesForNodeGroup(group) - } else { - return nil, fmt.Errorf("provider does not support InstanceGroups") - } + return TestContext.CloudConfig.Provider.GetGroupNodes(group) } func GroupSize(group string) (int, error) { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - // TODO: make this hit the compute API directly instead of shelling out to gcloud. - // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic - zone, err := getGCEZoneForGroup(group) - if err != nil { - return -1, err - } - output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", - "list-instances", group, "--project="+TestContext.CloudConfig.ProjectID, - "--zone="+zone).CombinedOutput() - if err != nil { - return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output) - } - re := regexp.MustCompile("RUNNING") - return len(re.FindAllString(string(output), -1)), nil - } else if TestContext.Provider == "aws" { - client := autoscaling.New(session.New()) - instanceGroup, err := awscloud.DescribeInstanceGroup(client, group) - if err != nil { - return -1, fmt.Errorf("error describing instance group: %v", err) - } - if instanceGroup == nil { - return -1, fmt.Errorf("instance group not found: %s", group) - } - return instanceGroup.CurrentSize() - } else if TestContext.Provider == "kubemark" { - return TestContext.CloudConfig.KubemarkController.GetNodeGroupSize(group) - } else { - return -1, fmt.Errorf("provider does not support InstanceGroups") - } + return TestContext.CloudConfig.Provider.GroupSize(group) } func WaitForGroupSize(group string, size int32) error { @@ -139,16 +59,3 @@ func WaitForGroupSize(group string, size int32) error { } return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size) } - -func getGCEZoneForGroup(group string) (string, error) { - zone := TestContext.CloudConfig.Zone - if TestContext.CloudConfig.MultiZone { - output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list", - "--project="+TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput() - if err != nil { - return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output) - } - zone = strings.TrimSpace(string(output)) - } - return zone, nil -} diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index fdedbb7e713..dc3bd24640c 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -29,9 +29,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - cloudprovider "k8s.io/cloud-provider" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - "k8s.io/kubernetes/pkg/kubemark" ) const defaultHost = "http://127.0.0.1:8080" @@ -65,13 +63,12 @@ const defaultHost = "http://127.0.0.1:8080" // Test suite authors can use framework/viper to make all command line // parameters also configurable via a configuration file. type TestContextType struct { - KubeConfig string - KubemarkExternalKubeConfig string - KubeContext string - KubeAPIContentType string - KubeVolumeDir string - CertDir string - Host string + KubeConfig string + KubeContext string + KubeAPIContentType string + KubeVolumeDir string + CertDir string + Host string // TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987. RepoRoot string DockershimCheckpointDir string @@ -187,8 +184,7 @@ type CloudConfig struct { NodeTag string MasterTag string - Provider cloudprovider.Interface - KubemarkController *kubemark.KubemarkController + Provider ProviderInterface } var TestContext TestContextType @@ -236,7 +232,6 @@ func RegisterCommonFlags() { func RegisterClusterFlags() { flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.") flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.") - flag.StringVar(&TestContext.KubemarkExternalKubeConfig, fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.") flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'") flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver") @@ -367,4 +362,11 @@ func AfterReadingAllFlags(t *TestContextType) { if t.AllowedNotReadyNodes == 0 { t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100 } + + // Make sure that all test runs have a valid TestContext.CloudConfig.Provider. + var err error + TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider) + if err != nil { + Failf("Failed to setup provide r config: %v", err) + } } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index b0ddff8de8c..3df05718782 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -44,7 +44,6 @@ import ( "github.com/golang/glog" "golang.org/x/crypto/ssh" "golang.org/x/net/websocket" - "google.golang.org/api/googleapi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -84,8 +83,6 @@ import ( extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/conditions" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/service" @@ -4443,48 +4440,10 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName return string(logs), err } -func GetGCECloud() (*gcecloud.GCECloud, error) { - gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) - if !ok { - return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider) - } - return gceCloud, nil -} - // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { - if TestContext.Provider == "gce" || TestContext.Provider == "gke" { - return ensureGCELoadBalancerResourcesDeleted(ip, portRange) - } - return nil -} - -func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { - gceCloud, err := GetGCECloud() - if err != nil { - return err - } - project := TestContext.CloudConfig.ProjectID - region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone) - if err != nil { - return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err) - } - - return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - service := gceCloud.ComputeServices().GA - list, err := service.ForwardingRules.List(project, region).Do() - if err != nil { - return false, err - } - for _, item := range list.Items { - if item.PortRange == portRange && item.IPAddress == ip { - Logf("found a load balancer: %v", item) - return false, nil - } - } - return true, nil - }) + return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ip, portRange) } // The following helper functions can block/unblock network from source @@ -4943,78 +4902,6 @@ func (p *E2ETestNodePreparer) CleanupNodes() error { return encounteredError } -func GetClusterID(c clientset.Interface) (string, error) { - cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) - if err != nil || cm == nil { - return "", fmt.Errorf("error getting cluster ID: %v", err) - } - clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster] - providerID, providerIDExists := cm.Data[gcecloud.UIDProvider] - if !clusterIDExists { - return "", fmt.Errorf("cluster ID not set") - } - if providerIDExists { - return providerID, nil - } - return clusterID, nil -} - -// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with -// the given name. The name is usually the UUID of the Service prefixed with an -// alpha-numeric character ('a') to work around cloudprovider rules. -func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) { - gceCloud, err := GetGCECloud() - if err != nil { - return err - } - if region == "" { - // Attempt to parse region from zone if no region is given. - region, err = gcecloud.GetGCERegion(zone) - if err != nil { - return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) - } - } - if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil && - !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { - retErr = err - } - if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil && - !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { - retErr = fmt.Errorf("%v\n%v", retErr, err) - - } - if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil && - !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { - retErr = fmt.Errorf("%v\n%v", retErr, err) - } - clusterID, err := GetClusterID(c) - if err != nil { - retErr = fmt.Errorf("%v\n%v", retErr, err) - return - } - hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)} - hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName) - if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) { - retErr = fmt.Errorf("%v\n%v", retErr, getErr) - return - } - if hc != nil { - hcNames = append(hcNames, hc.Name) - } - if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil && - !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { - retErr = fmt.Errorf("%v\n%v", retErr, err) - } - return -} - -// IsHTTPErrorCode returns true if the error is a google api -// error matching the corresponding HTTP error code. -func IsGoogleAPIHTTPErrorCode(err error, code int) bool { - apiErr, ok := err.(*googleapi.Error) - return ok && apiErr.Code == code -} - // getMaster populates the externalIP, internalIP and hostname fields of the master. // If any of these is unavailable, it is set to "". func getMaster(c clientset.Interface) Address { @@ -5173,15 +5060,6 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err return err } -// GetAzureCloud returns azure cloud provider -func GetAzureCloud() (*azure.Cloud, error) { - cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud) - if !ok { - return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider) - } - return cloud, nil -} - func PrintSummaries(summaries []TestDataSummary, testBaseName string) { now := time.Now() for i := range summaries { diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index fd31dec8454..89b6b032104 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -27,6 +27,7 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -42,9 +43,12 @@ var _ = SIGDescribe("Firewall rule", func() { BeforeEach(func() { framework.SkipUnlessProviderIs("gce") + + var err error cs = f.ClientSet cloudConfig = framework.TestContext.CloudConfig - gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud) + gceCloud, err = gce.GetGCECloud() + Expect(err).NotTo(HaveOccurred()) }) // This test takes around 6 minutes to run @@ -55,7 +59,7 @@ var _ = SIGDescribe("Firewall rule", func() { serviceName := "firewall-test-loadbalancer" By("Getting cluster ID") - clusterID, err := framework.GetClusterID(cs) + clusterID, err := gce.GetClusterID(cs) Expect(err).NotTo(HaveOccurred()) framework.Logf("Got cluster ID: %v", clusterID) @@ -70,7 +74,7 @@ var _ = SIGDescribe("Firewall rule", func() { By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { - svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: framework.FirewallTestHttpPort}} + svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: gce.FirewallTestHttpPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges }) defer func() { @@ -80,23 +84,23 @@ var _ = SIGDescribe("Firewall rule", func() { }) Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) By("Waiting for the local traffic health check firewall rule to be deleted") - localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) - _, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) + localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) + _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) Expect(err).NotTo(HaveOccurred()) }() svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP By("Checking if service's firewall rule is correct") - lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag) + lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag) fw, err := gceCloud.GetFirewall(lbFw.Name) Expect(err).NotTo(HaveOccurred()) - Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) By("Checking if service's nodes health check firewall rule is correct") - nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true) + nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true) fw, err = gceCloud.GetFirewall(nodesHCFw.Name) Expect(err).NotTo(HaveOccurred()) - Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") @@ -105,19 +109,19 @@ var _ = SIGDescribe("Firewall rule", func() { }) By("Waiting for the nodes health check firewall rule to be deleted") - _, err = framework.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) + _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) Expect(err).NotTo(HaveOccurred()) By("Waiting for the correct local traffic health check firewall rule to be created") - localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) - fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) + localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) + fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) Expect(err).NotTo(HaveOccurred()) - Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) for i, nodeName := range nodesNames { podName := fmt.Sprintf("netexec%v", i) - jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true) + jig.LaunchNetexecPodOnNode(f, nodeName, podName, gce.FirewallTestHttpPort, gce.FirewallTestUdpPort, true) defer func() { framework.Logf("Cleaning up the netexec pod: %v", podName) Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred()) @@ -126,7 +130,7 @@ var _ = SIGDescribe("Firewall rule", func() { // Send requests from outside of the cluster because internal traffic is whitelisted By("Accessing the external service ip from outside, all non-master nodes should be reached") - Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect @@ -140,17 +144,17 @@ var _ = SIGDescribe("Firewall rule", func() { if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok { zone = zoneInLabel } - removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) + removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) defer func() { By("Adding tags back to the node and wait till the traffic is recovered") nodesSet.Insert(nodesNames[0]) - framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) + gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) }() By("Accessing serivce through the external ip and examine got no response from the node without tags") - Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) }) It("should have correct firewall rules for e2e cluster", func() { @@ -160,25 +164,25 @@ var _ = SIGDescribe("Firewall rule", func() { } By("Checking if e2e firewall rules are correct") - for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { + for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { fw, err := gceCloud.GetFirewall(expFw.Name) Expect(err).NotTo(HaveOccurred()) - Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) } By("Checking well known ports on master and nodes are not exposed externally") nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) Expect(len(nodeAddrs)).NotTo(BeZero()) masterAddr := framework.GetMasterAddress(cs) - flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout) + flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) }) }) diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 33e326247da..7d2aa8f1cdc 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -35,8 +35,9 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" - gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/ingress" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -46,14 +47,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { defer GinkgoRecover() var ( ns string - jig *framework.IngressTestJig - conformanceTests []framework.IngressConformanceTests + jig *ingress.IngressTestJig + conformanceTests []ingress.IngressConformanceTests cloudConfig framework.CloudConfig ) f := framework.NewDefaultFramework("ingress") BeforeEach(func() { - jig = framework.NewIngressTestJig(f.ClientSet) + jig = ingress.NewIngressTestJig(f.ClientSet) ns = f.Namespace.Name cloudConfig = framework.TestContext.CloudConfig @@ -76,13 +77,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Slow by design ~10m for each "It" block dominated by loadbalancer setup time // TODO: write similar tests for nginx, haproxy and AWS Ingress. Describe("GCE [Slow] [Feature:Ingress]", func() { - var gceController *framework.GCEIngressController + var gceController *gce.GCEIngressController // Platform specific setup BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") By("Initializing gce controller") - gceController = &framework.GCEIngressController{ + gceController = &gce.GCEIngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, @@ -108,7 +109,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) It("should conform to Ingress spec", func() { - conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{}) + conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { By(t.EntryLog) t.Execute() @@ -128,13 +129,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { nodeTags := []string{cloudConfig.NodeTag} if framework.TestContext.Provider != "gce" { // nodeTags would be different in GKE. - nodeTags = framework.GetNodeTags(jig.Client, cloudConfig) + nodeTags = gce.GetNodeTags(jig.Client, cloudConfig) } - expFw := jig.ConstructFirewallForIngress(gceController, nodeTags) + expFw := jig.ConstructFirewallForIngress(gceController.GetFirewallRuleName(), nodeTags) // Passed the last argument as `true` to verify the backend ports is a subset // of the allowed ports in firewall rule, given there may be other existing // ingress resources and backends we are not aware of. - Expect(framework.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred()) + Expect(gce.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred()) // TODO: uncomment the restart test once we have a way to synchronize // and know that the controller has resumed watching. If we delete @@ -211,7 +212,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { Expect(err).NotTo(HaveOccurred()) By("Creating a basic HTTP ingress and wait for it to come up") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil) jig.WaitForIngress(true) By("Updating the path on ingress and wait for it to take effect") @@ -239,11 +240,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should not reconcile manually modified health check for ingress", func() { By("Creating a basic HTTP ingress and wait for it to come up.") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil) jig.WaitForIngress(true) // Get cluster UID. - clusterID, err := framework.GetClusterID(f.ClientSet) + clusterID, err := gce.GetClusterID(f.ClientSet) Expect(err).NotTo(HaveOccurred()) // Get the related nodeports. nodePorts := jig.GetIngressNodePorts(false) @@ -251,7 +252,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Filter health check using cluster UID as the suffix. By("Retrieving relevant health check resources from GCE.") - gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud) + gceCloud, err := gce.GetGCECloud() + Expect(err).NotTo(HaveOccurred()) hcs, err := gceCloud.ListHealthChecks() Expect(err).NotTo(HaveOccurred()) var hcToChange *compute.HealthCheck @@ -314,8 +316,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should support multiple TLS certs", func() { By("Creating an ingress with no certs.") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "multiple-certs"), ns, map[string]string{ - framework.IngressStaticIPKey: ns, + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ + ingress.IngressStaticIPKey: ns, }, map[string]string{}) By("Adding multiple certs to the ingress.") @@ -350,8 +352,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("multicluster ingress should get instance group annotation", func() { name := "echomap" - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{ - framework.IngressClassKey: framework.MulticlusterIngressClassValue, + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ + ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, }, map[string]string{}) By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) @@ -359,25 +361,25 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations - if annotations == nil || annotations[framework.InstanceGroupAnnotation] == "" { - framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", framework.InstanceGroupAnnotation, annotations) + if annotations == nil || annotations[ingress.InstanceGroupAnnotation] == "" { + framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", ingress.InstanceGroupAnnotation, annotations) return false, nil } return true, nil }) if pollErr != nil { - framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, framework.InstanceGroupAnnotation)) + framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, ingress.InstanceGroupAnnotation)) } // Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc. // Note: All resources except the firewall rule have an annotation. - umKey := framework.StatusPrefix + "/url-map" - fwKey := framework.StatusPrefix + "/forwarding-rule" - tpKey := framework.StatusPrefix + "/target-proxy" - fwsKey := framework.StatusPrefix + "/https-forwarding-rule" - tpsKey := framework.StatusPrefix + "/https-target-proxy" - scKey := framework.StatusPrefix + "/ssl-cert" - beKey := framework.StatusPrefix + "/backends" + umKey := ingress.StatusPrefix + "/url-map" + fwKey := ingress.StatusPrefix + "/forwarding-rule" + tpKey := ingress.StatusPrefix + "/target-proxy" + fwsKey := ingress.StatusPrefix + "/https-forwarding-rule" + tpsKey := ingress.StatusPrefix + "/https-target-proxy" + scKey := ingress.StatusPrefix + "/ssl-cert" + beKey := ingress.StatusPrefix + "/backends" wait.Poll(2*time.Second, time.Minute, func() (bool, error) { ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -423,7 +425,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { httpsScheme := "request_scheme=https" By("Create a basic HTTP2 ingress") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) address, err := jig.WaitForIngressAddress(jig.Client, jig.Ingress.Namespace, jig.Ingress.Name, framework.LoadBalancerPollTimeout) @@ -435,7 +437,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, svc := range svcList.Items { - svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}` + svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) Expect(err).NotTo(HaveOccurred()) } @@ -445,7 +447,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, svc := range svcList.Items { - svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}` + svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) Expect(err).NotTo(HaveOccurred()) } @@ -457,13 +459,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) Describe("GCE [Slow] [Feature:NEG]", func() { - var gceController *framework.GCEIngressController + var gceController *gce.GCEIngressController // Platform specific setup BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") By("Initializing gce controller") - gceController = &framework.GCEIngressController{ + gceController = &gce.GCEIngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, @@ -490,8 +492,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should conform to Ingress spec", func() { jig.PollInterval = 5 * time.Second - conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{ - framework.NEGAnnotation: `{"ingress": true}`, + conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ + ingress.NEGAnnotation: `{"ingress": true}`, }) for _, t := range conformanceTests { By(t.EntryLog) @@ -507,7 +509,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should be able to switch between IG and NEG modes", func() { var err error By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) Expect(err).NotTo(HaveOccurred()) @@ -517,7 +519,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, svc := range svcList.Items { - svc.Annotations[framework.NEGAnnotation] = `{"ingress": false}` + svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) Expect(err).NotTo(HaveOccurred()) } @@ -530,7 +532,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, svc := range svcList.Items { - svc.Annotations[framework.NEGAnnotation] = `{"ingress": true}` + svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) Expect(err).NotTo(HaveOccurred()) } @@ -543,7 +545,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should be able to create a ClusterIP service", func() { var err error By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) svcPorts := jig.GetServicePorts(false) usingNEG, err := gceController.BackendServiceUsingNEG(svcPorts) @@ -566,7 +568,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale) Expect(err).NotTo(HaveOccurred()) } - wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) { + wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() if err != nil { return false, nil @@ -576,7 +578,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) Expect(err).NotTo(HaveOccurred()) @@ -601,7 +603,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { name := "hostname" replicas := 8 By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) Expect(err).NotTo(HaveOccurred()) @@ -661,15 +663,15 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale) Expect(err).NotTo(HaveOccurred()) } - wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) { + wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - var status framework.NegStatus - v, ok := svc.Annotations[framework.NEGStatusAnnotation] + var status ingress.NegStatus + v, ok := svc.Annotations[ingress.NEGStatusAnnotation] if !ok { // Wait for NEG sync loop to find NEGs - framework.Logf("Waiting for %v, got: %+v", framework.NEGStatusAnnotation, svc.Annotations) + framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations) return false, nil } err = json.Unmarshal([]byte(v), &status) @@ -677,7 +679,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.Logf("Error in parsing Expose NEG annotation: %v", err) return false, nil } - framework.Logf("Got %v: %v", framework.NEGStatusAnnotation, v) + framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v) // Expect 2 NEGs to be created based on the test setup (neg-exposed) if len(status.NetworkEndpointGroups) != 2 { @@ -695,7 +697,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys) } - gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud) + gceCloud, err := gce.GetGCECloud() + Expect(err).NotTo(HaveOccurred()) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) Expect(err).NotTo(HaveOccurred()) @@ -710,7 +713,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) Expect(err).NotTo(HaveOccurred()) @@ -733,16 +736,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) Describe("GCE [Slow] [Feature:kubemci]", func() { - var gceController *framework.GCEIngressController + var gceController *gce.GCEIngressController var ipName, ipAddress string // Platform specific setup BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - jig.Class = framework.MulticlusterIngressClassValue + jig.Class = ingress.MulticlusterIngressClassValue jig.PollInterval = 5 * time.Second By("Initializing gce controller") - gceController = &framework.GCEIngressController{ + gceController = &gce.GCEIngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, @@ -775,8 +778,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) It("should conform to Ingress spec", func() { - conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{ - framework.IngressStaticIPKey: ipName, + conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ + ingress.IngressStaticIPKey: ipName, }) for _, t := range conformanceTests { By(t.EntryLog) @@ -800,9 +803,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("should remove clusters as expected", func() { ingAnnotations := map[string]string{ - framework.IngressStaticIPKey: ipName, + ingress.IngressStaticIPKey: ipName, } - ingFilePath := filepath.Join(framework.IngressManifestPath, "http") + ingFilePath := filepath.Join(ingress.IngressManifestPath, "http") jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) name := jig.Ingress.Name @@ -830,7 +833,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { It("single and multi-cluster ingresses should be able to exist together", func() { By("Creating a single cluster ingress first") jig.Class = "" - singleIngFilePath := filepath.Join(framework.IngressManifestPath, "static-ip-2") + singleIngFilePath := filepath.Join(ingress.IngressManifestPath, "static-ip-2") jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) // jig.Ingress will be overwritten when we create MCI, so keep a reference. @@ -838,11 +841,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Create the multi-cluster ingress next. By("Creating a multi-cluster ingress next") - jig.Class = framework.MulticlusterIngressClassValue + jig.Class = ingress.MulticlusterIngressClassValue ingAnnotations := map[string]string{ - framework.IngressStaticIPKey: ipName, + ingress.IngressStaticIPKey: ipName, } - multiIngFilePath := filepath.Join(framework.IngressManifestPath, "http") + multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http") jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) mciIngress := jig.Ingress @@ -852,7 +855,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.Class = "" jig.TryDeleteIngress() jig.Ingress = mciIngress - jig.Class = framework.MulticlusterIngressClassValue + jig.Class = ingress.MulticlusterIngressClassValue jig.WaitForIngress(false /*waitForNodePort*/) By("Cleanup: Deleting the multi-cluster ingress") @@ -862,19 +865,19 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Time: borderline 5m, slow by design Describe("[Slow] Nginx", func() { - var nginxController *framework.NginxIngressController + var nginxController *ingress.NginxIngressController BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") By("Initializing nginx controller") jig.Class = "nginx" - nginxController = &framework.NginxIngressController{Ns: ns, Client: jig.Client} + nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} // TODO: This test may fail on other platforms. We can simply skip it // but we want to allow easy testing where a user might've hand // configured firewalls. if framework.ProviderIs("gce", "gke") { - framework.ExpectNoError(framework.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network)) + framework.ExpectNoError(gce.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network)) } else { framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.") } @@ -884,7 +887,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { AfterEach(func() { if framework.ProviderIs("gce", "gke") { - framework.ExpectNoError(framework.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID)) + framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID)) } if CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) @@ -901,7 +904,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Poll more frequently to reduce e2e completion time. // This test runs in presubmit. jig.PollInterval = 5 * time.Second - conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{}) + conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { By(t.EntryLog) t.Execute() @@ -923,13 +926,13 @@ func verifyKubemciStatusHas(name, expectedSubStr string) { } } -func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) { +func executePresharedCertTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) { preSharedCertName := "test-pre-shared-cert" By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" - cert, key, err := framework.GenerateRSACerts(testHostname, true) + cert, key, err := ingress.GenerateRSACerts(testHostname, true) Expect(err).NotTo(HaveOccurred()) - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) defer func() { // We would not be able to delete the cert until ingress controller @@ -959,36 +962,36 @@ func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTest By("Creating an ingress referencing the pre-shared certificate") // Create an ingress referencing this cert using pre-shared-cert annotation. ingAnnotations := map[string]string{ - framework.IngressPreSharedCertKey: preSharedCertName, + ingress.IngressPreSharedCertKey: preSharedCertName, // Disallow HTTP to save resources. This is irrelevant to the // pre-shared cert test. - framework.IngressAllowHTTPKey: "false", + ingress.IngressAllowHTTPKey: "false", } if staticIPName != "" { - ingAnnotations[framework.IngressStaticIPKey] = staticIPName + ingAnnotations[ingress.IngressStaticIPKey] = staticIPName } - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) By("Test that ingress works with the pre-shared certificate") err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } -func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *framework.IngressTestJig, ipName, ip string) { - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ - framework.IngressStaticIPKey: ipName, - framework.IngressAllowHTTPKey: "false", +func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.IngressTestJig, ipName, ip string) { + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ + ingress.IngressStaticIPKey: ipName, + ingress.IngressAllowHTTPKey: "false", }, map[string]string{}) By("waiting for Ingress to come up with ip: " + ip) - httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout) + httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) By("should reject HTTP traffic") framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } -func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) { +func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) { By("Creating a set of ingress, service and deployment that have backside re-encryption configured") deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) defer func() { @@ -1004,7 +1007,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP") By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) - timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout} + timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") if err != nil { @@ -1020,8 +1023,8 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") } -func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *framework.IngressTestJig, address, version, scheme string) { - timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout} +func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.IngressTestJig, address, version, scheme string) { + timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} resp := "" err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "") diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index 7f7c808df51..3cad210e9c1 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -30,6 +30,7 @@ import ( gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -53,7 +54,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { } for _, lb := range serviceLBNames { framework.Logf("cleaning gce resource for %s", lb) - framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) } //reset serviceLBNames serviceLBNames = []string{} @@ -102,7 +103,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { // Test 3: create a standard-tierd LB with a user-requested IP. By("reserving a static IP for the load balancer") requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId) - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard) Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address") @@ -187,7 +188,7 @@ func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) { } func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) { - cloud, err := framework.GetGCECloud() + cloud, err := gce.GetGCECloud() if err != nil { return nil, err } diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 54262051890..ce8d43c5ca5 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -29,6 +29,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/ingress" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) const ( @@ -59,10 +61,10 @@ var ( // IngressScaleFramework defines the framework for ingress scale testing. type IngressScaleFramework struct { Clientset clientset.Interface - Jig *framework.IngressTestJig - GCEController *framework.GCEIngressController + Jig *ingress.IngressTestJig + GCEController *gce.GCEIngressController CloudConfig framework.CloudConfig - Logger framework.TestLogger + Logger ingress.TestLogger Namespace string EnableTLS bool @@ -92,7 +94,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra Namespace: ns, Clientset: cs, CloudConfig: cloudConfig, - Logger: &framework.E2ELogger{}, + Logger: &ingress.E2ELogger{}, EnableTLS: true, NumIngressesTest: []int{ numIngressesSmall, @@ -106,10 +108,10 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra // PrepareScaleTest prepares framework for ingress scale testing. func (f *IngressScaleFramework) PrepareScaleTest() error { f.Logger.Infof("Initializing ingress test suite and gce controller...") - f.Jig = framework.NewIngressTestJig(f.Clientset) + f.Jig = ingress.NewIngressTestJig(f.Clientset) f.Jig.Logger = f.Logger f.Jig.PollInterval = scaleTestPollInterval - f.GCEController = &framework.GCEIngressController{ + f.GCEController = &gce.GCEIngressController{ Client: f.Clientset, Cloud: f.CloudConfig, } diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index 62c44e31614..0fc38675eb5 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -33,6 +33,8 @@ import ( gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/ingress" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/network/scale" ) @@ -117,7 +119,7 @@ func main() { glog.Errorf("Error building GCE provider: %v", err) os.Exit(1) } - cloudConfig.Provider = gceCloud + cloudConfig.Provider = gce.NewProvider(gceCloud) testSuccessFlag := true defer func() { @@ -150,7 +152,7 @@ func main() { // Setting up a localized scale test framework. f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig) - f.Logger = &framework.GLogger{} + f.Logger = &ingress.GLogger{} // Customizing scale test. f.EnableTLS = enableTLS f.OutputFile = outputFile diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 5bac2e89378..91d10bcfb6e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -589,7 +590,7 @@ var _ = SIGDescribe("Services", func() { if framework.ProviderIs("gce", "gke") { By("creating a static load balancer IP") staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId) - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) @@ -646,7 +647,7 @@ var _ = SIGDescribe("Services", func() { // coming from, so this is first-aid rather than surgery). By("demoting the static IP to ephemeral") if staticIPName != "" { - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 8db74c79688..0493a87866f 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() { @@ -59,7 +60,7 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() { // OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) // Get all the zones that the nodes are in diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 50b94a6c0c6..3b7543b1091 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -385,7 +386,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { if disruptOp == deleteNode { By("getting gce instances") - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err)) output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) @@ -476,7 +477,7 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName func detachPD(nodeName types.NodeName, pdName string) error { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() if err != nil { return err } @@ -580,7 +581,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num func waitForPDDetach(diskName string, nodeName types.NodeName) error { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName) - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() if err != nil { return err } diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index d860ce009e3..e3a6b422725 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -26,12 +26,13 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/utils" ) // verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index baf0dd77989..4fc0cfd24f9 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -37,6 +37,7 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -210,7 +211,7 @@ func testZonalFailover(c clientset.Interface, ns string) { waitStatus <- waitForStatefulSetReplicasNotReady(statefulSet.Name, ns, c) }() - cloud, err := framework.GetGCECloud() + cloud, err := gce.GetGCECloud() if err != nil { Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index c75988a6378..6feaed756b5 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -46,6 +46,7 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -332,7 +333,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) } func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { - cloud, err := framework.GetGCECloud() + cloud, err := gce.GetGCECloud() if err != nil { return err } @@ -609,7 +610,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { allZones := sets.NewString() // all zones in the project managedZones := sets.NewString() // subset of allZones - gceCloud, err := framework.GetGCECloud() + gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) // Get all k8s managed zones (same as zones with nodes in them for test) diff --git a/test/e2e/upgrades/ingress.go b/test/e2e/upgrades/ingress.go index 5070099ad2e..fbaacc3b7d3 100644 --- a/test/e2e/upgrades/ingress.go +++ b/test/e2e/upgrades/ingress.go @@ -31,6 +31,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/ingress" + "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) // Dependent on "static-ip-2" manifests @@ -39,10 +41,10 @@ const host = "ingress.test.com" // IngressUpgradeTest adapts the Ingress e2e for upgrade testing type IngressUpgradeTest struct { - gceController *framework.GCEIngressController + gceController *gce.GCEIngressController // holds GCP resources pre-upgrade resourceStore *GCPResourceStore - jig *framework.IngressTestJig + jig *ingress.IngressTestJig httpClient *http.Client ip string ipName string @@ -73,12 +75,12 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) { framework.SkipUnlessProviderIs("gce", "gke") // jig handles all Kubernetes testing logic - jig := framework.NewIngressTestJig(f.ClientSet) + jig := ingress.NewIngressTestJig(f.ClientSet) ns := f.Namespace // gceController handles all cloud testing logic - gceController := &framework.GCEIngressController{ + gceController := &gce.GCEIngressController{ Ns: ns.Name, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, @@ -87,7 +89,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) { t.gceController = gceController t.jig = jig - t.httpClient = framework.BuildInsecureClient(framework.IngressReqTimeout) + t.httpClient = ingress.BuildInsecureClient(ingress.IngressReqTimeout) // Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController t.ipName = fmt.Sprintf("%s-static-ip", ns.Name) @@ -95,9 +97,9 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) { // Create a working basic Ingress By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip)) - jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{ - framework.IngressStaticIPKey: t.ipName, - framework.IngressAllowHTTPKey: "false", + jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{ + ingress.IngressStaticIPKey: t.ipName, + ingress.IngressAllowHTTPKey: "false", }, map[string]string{}) t.jig.SetHTTPS("tls-secret", "ingress.test.com") diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 3cd0c01a7c1..2b1fce9fd0c 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -71,6 +71,7 @@ func init() { // It seems that someone is using flag.Parse() after init() and TestMain(). // TODO(random-liu): Find who is using flag.Parse() and cause errors and move the following logic // into TestContext. + // TODO(pohly): remove RegisterNodeFlags from test_context.go enable Viper config support here? } func TestMain(m *testing.M) { From 97101a6a86ea54959aa0eb20f3154f2d5fc290d6 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 27 Sep 2018 13:05:44 +0200 Subject: [PATCH 02/19] e2e: update bazel BUILD files Generated via hack/update-bazel.sh. --- test/e2e/BUILD | 6 ++- test/e2e/framework/BUILD | 34 +++------------ test/e2e/framework/ingress/BUILD | 42 ++++++++++++++++++ test/e2e/framework/providers/aws/BUILD | 32 ++++++++++++++ test/e2e/framework/providers/azure/BUILD | 28 ++++++++++++ test/e2e/framework/providers/gce/BUILD | 48 +++++++++++++++++++++ test/e2e/framework/providers/kubemark/BUILD | 30 +++++++++++++ test/e2e/network/BUILD | 2 + test/e2e/network/scale/BUILD | 2 + test/e2e/network/scale/localrun/BUILD | 2 + test/e2e/scheduling/BUILD | 1 + test/e2e/storage/BUILD | 1 + test/e2e/upgrades/BUILD | 2 + 13 files changed, 201 insertions(+), 29 deletions(-) create mode 100644 test/e2e/framework/ingress/BUILD create mode 100644 test/e2e/framework/providers/aws/BUILD create mode 100644 test/e2e/framework/providers/azure/BUILD create mode 100644 test/e2e/framework/providers/gce/BUILD create mode 100644 test/e2e/framework/providers/kubemark/BUILD diff --git a/test/e2e/BUILD b/test/e2e/BUILD index ffc1b68a627..a7f1864dc93 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -46,8 +46,6 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e", deps = [ "//pkg/api/v1/pod:go_default_library", - "//pkg/cloudprovider/providers/azure:go_default_library", - "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", @@ -63,6 +61,10 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/providers/aws:go_default_library", + "//test/e2e/framework/providers/azure:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", + "//test/e2e/framework/providers/kubemark:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 465d6c8ad0e..11b184cb7f4 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -1,10 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -14,13 +10,11 @@ go_library( "crd_util.go", "deployment_util.go", "exec_util.go", - "firewall_util.go", "flake_reporting_util.go", "framework.go", "get-kubemark-resource-usage.go", "google_compute.go", "gpu_util.go", - "ingress_utils.go", "jobs_util.go", "kubelet_stats.go", "log_size_monitoring.go", @@ -30,6 +24,7 @@ go_library( "perf_util.go", "pods.go", "profile_gatherer.go", + "provider.go", "psp_util.go", "pv_util.go", "rc_util.go", @@ -53,9 +48,6 @@ go_library( "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/conditions:go_default_library", - "//pkg/cloudprovider/providers/aws:go_default_library", - "//pkg/cloudprovider/providers/azure:go_default_library", - "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/controller/job:go_default_library", @@ -70,7 +62,6 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/sysctl:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//pkg/kubemark:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/cache:go_default_library", @@ -117,7 +108,6 @@ go_library( "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/discovery/cached:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", @@ -131,21 +121,14 @@ go_library( "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", - "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/e2e/perftype:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", @@ -155,8 +138,6 @@ go_library( "//vendor/github.com/prometheus/common/model:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", - "//vendor/google.golang.org/api/compute/v1:go_default_library", - "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -174,16 +155,15 @@ filegroup( ":package-srcs", "//test/e2e/framework/config:all-srcs", "//test/e2e/framework/ginkgowrapper:all-srcs", + "//test/e2e/framework/ingress:all-srcs", "//test/e2e/framework/metrics:all-srcs", + "//test/e2e/framework/providers/aws:all-srcs", + "//test/e2e/framework/providers/azure:all-srcs", + "//test/e2e/framework/providers/gce:all-srcs", + "//test/e2e/framework/providers/kubemark:all-srcs", "//test/e2e/framework/testfiles:all-srcs", "//test/e2e/framework/timer:all-srcs", "//test/e2e/framework/viperconfig:all-srcs", ], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["firewall_util_test.go"], - embed = [":go_default_library"], -) diff --git a/test/e2e/framework/ingress/BUILD b/test/e2e/framework/ingress/BUILD new file mode 100644 index 00000000000..c5c46a76011 --- /dev/null +++ b/test/e2e/framework/ingress/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["ingress_utils.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/ingress", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/framework/testfiles:go_default_library", + "//test/e2e/manifest:go_default_library", + "//test/utils:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/providers/aws/BUILD b/test/e2e/framework/providers/aws/BUILD new file mode 100644 index 00000000000..cbb3587423a --- /dev/null +++ b/test/e2e/framework/providers/aws/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["aws.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/providers/aws", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/aws:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//test/e2e/framework:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/providers/azure/BUILD b/test/e2e/framework/providers/azure/BUILD new file mode 100644 index 00000000000..038c7832e9a --- /dev/null +++ b/test/e2e/framework/providers/azure/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["azure.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/providers/azure", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/azure:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//test/e2e/framework:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/providers/gce/BUILD b/test/e2e/framework/providers/gce/BUILD new file mode 100644 index 00000000000..26f978b789b --- /dev/null +++ b/test/e2e/framework/providers/gce/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "firewall.go", + "gce.go", + "ingress.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/framework/providers/gce", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/gce:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//test/e2e/framework:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["firewall_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/providers/kubemark/BUILD b/test/e2e/framework/providers/kubemark/BUILD new file mode 100644 index 00000000000..118a61d6fb3 --- /dev/null +++ b/test/e2e/framework/providers/kubemark/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["kubemark.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/providers/kubemark", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubemark:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", + "//test/e2e/framework:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index 7e1bd5a4d2d..663a2e5d6e9 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -62,6 +62,8 @@ go_library( "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/ingress:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/network/scale:go_default_library", "//test/images/net/nat:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/network/scale/BUILD b/test/e2e/network/scale/BUILD index c3bf5f584d4..f99c7d97ade 100644 --- a/test/e2e/network/scale/BUILD +++ b/test/e2e/network/scale/BUILD @@ -12,6 +12,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/ingress:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", ], ) diff --git a/test/e2e/network/scale/localrun/BUILD b/test/e2e/network/scale/localrun/BUILD index 667c5e5ffca..e2bffae8757 100644 --- a/test/e2e/network/scale/localrun/BUILD +++ b/test/e2e/network/scale/localrun/BUILD @@ -12,6 +12,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/ingress:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/network/scale:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 1dcf7f06623..da6de8f703f 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -40,6 +40,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 219917f7ca3..abe4cfdbc43 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -71,6 +71,7 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/crd:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/e2e/storage/drivers:go_default_library", diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 59f50dd916e..9eca273c31d 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -38,6 +38,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/ingress:go_default_library", + "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", "//test/utils/image:go_default_library", From 0dba66a2a1bb32ec2eed8c87931829416522a41d Mon Sep 17 00:00:00 2001 From: xichengliudui Date: Mon, 15 Oct 2018 22:42:41 -0400 Subject: [PATCH 03/19] Delete repeated words and fix misspellings Delete repeated words and fix misspellings --- cmd/kubeadm/app/apis/kubeadm/types.go | 2 +- cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go | 2 +- cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go | 2 +- cmd/kubeadm/app/util/config/initconfiguration.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index d2471c67a19..d2490223ea4 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -181,7 +181,7 @@ type NodeRegistrationOptions struct { // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap - // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. KubeletExtraArgs map[string]string } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go index faa08cfb976..b983efceb2b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go @@ -162,7 +162,7 @@ type NodeRegistrationOptions struct { // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap - // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go b/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go index 11336648121..650839e498a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go @@ -162,7 +162,7 @@ type NodeRegistrationOptions struct { // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap - // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` } diff --git a/cmd/kubeadm/app/util/config/initconfiguration.go b/cmd/kubeadm/app/util/config/initconfiguration.go index 8150961f5a4..4fbf1fdbfeb 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration.go +++ b/cmd/kubeadm/app/util/config/initconfiguration.go @@ -118,7 +118,7 @@ func SetAPIEndpointDynamicDefaults(cfg *kubeadmapi.APIEndpoint) error { return nil } -// SetClusterDynamicDefaults checks and sets configuration values for the InitConfiguration object +// SetClusterDynamicDefaults checks and sets values for the ClusterConfiguration object func SetClusterDynamicDefaults(cfg *kubeadmapi.ClusterConfiguration, advertiseAddress string, bindPort int32) error { // Default all the embedded ComponentConfig structs componentconfigs.Known.Default(cfg) From ad0179897d49409989560b7aae89661c4f182375 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Tue, 16 Oct 2018 18:01:53 +0300 Subject: [PATCH 04/19] kubeadm: skip upgrade if manifest is not changed When doing upgrades kubeadm generates new manifest and waits until kubelet restarts correspondent pod. However, kubelet won't restart pod if there are no changes in the manifest. That makes kubeadm stuck waiting for restarted pod. Skipping upgrade if new component manifest is the same as current manifest should solve this. Fixes: kubernetes/kubeadm#1054 --- cmd/kubeadm/app/phases/upgrade/BUILD | 1 + cmd/kubeadm/app/phases/upgrade/staticpods.go | 11 +++ cmd/kubeadm/app/util/staticpod/utils.go | 15 +++++ cmd/kubeadm/app/util/staticpod/utils_test.go | 71 ++++++++++++++++++++ 4 files changed, 98 insertions(+) diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 3a4b859a9c6..c1accd0ecfc 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -37,6 +37,7 @@ go_library( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", "//cmd/kubeadm/app/util/etcd:go_default_library", + "//cmd/kubeadm/app/util/staticpod:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 94f18d778b8..cae7d7a1c7e 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" + "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod" ) const ( @@ -201,6 +202,16 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP // Store the backup path in the recover list. If something goes wrong now, this component will be rolled back. recoverManifests[component] = backupManifestPath + // Skip upgrade if current and new manifests are equal + equal, err := staticpod.ManifestFilesAreEqual(currentManifestPath, newManifestPath) + if err != nil { + return err + } + if equal { + fmt.Printf("[upgrade/staticpods] current and new manifests of %s are equal, skipping upgrade\n", component) + return nil + } + // Move the old manifest into the old-manifests directory if err := pathMgr.MoveFile(currentManifestPath, backupManifestPath); err != nil { return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd) diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go index 8df649594af..6e2db42a458 100644 --- a/cmd/kubeadm/app/util/staticpod/utils.go +++ b/cmd/kubeadm/app/util/staticpod/utils.go @@ -17,6 +17,7 @@ limitations under the License. package staticpod import ( + "bytes" "fmt" "io/ioutil" "net" @@ -288,3 +289,17 @@ func GetProbeAddress(cfg *kubeadmapi.InitConfiguration, componentName string) st } return "127.0.0.1" } + +// ManifestFilesAreEqual compares 2 files. It returns true if their contents are equal, false otherwise +func ManifestFilesAreEqual(path1, path2 string) (bool, error) { + content1, err := ioutil.ReadFile(path1) + if err != nil { + return false, err + } + content2, err := ioutil.ReadFile(path2) + if err != nil { + return false, err + } + + return bytes.Equal(content1, content2), nil +} diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go index ea9a1657d4e..36878e0c11b 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_test.go @@ -22,6 +22,7 @@ import ( "path/filepath" "reflect" "sort" + "strconv" "testing" "k8s.io/api/core/v1" @@ -622,3 +623,73 @@ func TestReadStaticPodFromDisk(t *testing.T) { } } } + +func TestManifestFilesAreEqual(t *testing.T) { + var tests = []struct { + description string + podYamls []string + expectedResult bool + expectErr bool + }{ + { + description: "manifests are equal", + podYamls: []string{validPod, validPod}, + expectedResult: true, + expectErr: false, + }, + { + description: "manifests are not equal", + podYamls: []string{validPod, validPod + "\n"}, + expectedResult: false, + expectErr: false, + }, + { + description: "first manifest doesn't exist", + podYamls: []string{validPod, ""}, + expectedResult: false, + expectErr: true, + }, + { + description: "second manifest doesn't exist", + podYamls: []string{"", validPod}, + expectedResult: false, + expectErr: true, + }, + } + + for _, rt := range tests { + tmpdir := testutil.SetupTempDir(t) + defer os.RemoveAll(tmpdir) + + // write 2 manifests + for i := 0; i < 2; i++ { + if rt.podYamls[i] != "" { + manifestPath := filepath.Join(tmpdir, strconv.Itoa(i)+".yaml") + err := ioutil.WriteFile(manifestPath, []byte(rt.podYamls[i]), 0644) + if err != nil { + t.Fatalf("Failed to write manifest file\n%s\n\tfatal error: %v", rt.description, err) + } + } + } + + // compare them + result, actualErr := ManifestFilesAreEqual(filepath.Join(tmpdir, "0.yaml"), filepath.Join(tmpdir, "1.yaml")) + if result != rt.expectedResult { + t.Errorf( + "ManifestFilesAreEqual failed\n%s\nexpected result: %t\nactual result: %t", + rt.description, + rt.expectedResult, + result, + ) + } + if (actualErr != nil) != rt.expectErr { + t.Errorf( + "ManifestFilesAreEqual failed\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v", + rt.description, + rt.expectErr, + (actualErr != nil), + actualErr, + ) + } + } +} From abac950cd72101b073e05250b99e3e3c2d067ef8 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 18 Oct 2018 11:52:04 +0200 Subject: [PATCH 05/19] kubeadm graduate preflight phase --- cmd/kubeadm/app/cmd/init.go | 119 ++++++++++++++++++++---- cmd/kubeadm/app/cmd/phases/preflight.go | 94 ++++++++++--------- 2 files changed, 154 insertions(+), 59 deletions(-) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index aa9047fa605..286a64533ce 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -37,6 +37,7 @@ import ( kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -126,6 +127,9 @@ type initData struct { skipTokenPrint bool dryRun bool ignorePreflightErrors sets.String + certificatesDir string + dryRunDir string + client clientset.Interface } // NewCmdInit returns "kubeadm init" command. @@ -160,7 +164,8 @@ func NewCmdInit(out io.Writer) *cobra.Command { options.bto.AddTTLFlag(cmd.PersistentFlags()) // initialize the workflow runner with the list of phases - // TODO: add the phases to the runner. e.g. initRunner.AppendPhase(phases.PreflightMaster) + initRunner.AppendPhase(phases.NewPreflightMasterPhase()) + // TODO: add other phases to the runner. // sets the data builder function, that will be used by the runner // both when running the entire workflow or single phases @@ -297,31 +302,113 @@ func newInitData(cmd *cobra.Command, options *initOptions) (initData, error) { return initData{}, err } + // if dry running creates a temporary folder for saving kubeadm generated files + dryRunDir := "" + if options.dryRun { + if dryRunDir, err = ioutil.TempDir("", "kubeadm-init-dryrun"); err != nil { + return initData{}, fmt.Errorf("couldn't create a temporary directory: %v", err) + } + } + return initData{ cfg: cfg, + certificatesDir: cfg.CertificatesDir, skipTokenPrint: options.skipTokenPrint, dryRun: options.dryRun, + dryRunDir: dryRunDir, ignorePreflightErrors: ignorePreflightErrorsSet, }, nil } +// Cfg returns initConfiguration. +func (d initData) Cfg() *kubeadmapi.InitConfiguration { + return d.cfg +} + +// DryRun returns the DryRun flag. +func (d initData) DryRun() bool { + return d.dryRun +} + +// SkipTokenPrint returns the SkipTokenPrint flag. +func (d initData) SkipTokenPrint() bool { + return d.skipTokenPrint +} + +// IgnorePreflightErrors returns the IgnorePreflightErrors flag. +func (d initData) IgnorePreflightErrors() sets.String { + return d.ignorePreflightErrors +} + +// CertificateWriteDir returns the path to the certificate folder or the temporary folder path in case of DryRun. +func (d initData) CertificateWriteDir() string { + if d.dryRun { + return d.dryRunDir + } + return d.certificatesDir +} + +// CertificateDir returns the CertificateDir as originally specified by the user. +func (d initData) CertificateDir() string { + return d.certificatesDir +} + +// KubeConfigDir returns the path of the kubernetes configuration folder or the temporary folder path in case of DryRun. +func (d initData) KubeConfigDir() string { + if d.dryRun { + return d.dryRunDir + } + return kubeadmconstants.KubernetesDir +} + +// KubeConfigDir returns the path where manifest should be stored or the temporary folder path in case of DryRun. +func (d initData) ManifestDir() string { + if d.dryRun { + return d.dryRunDir + } + return kubeadmconstants.GetStaticPodDirectory() +} + +// KubeletDir returns path of the kubelet configuration folder or the temporary folder in case of DryRun. +func (d initData) KubeletDir() string { + if d.dryRun { + return d.dryRunDir + } + return kubeadmconstants.KubeletRunDirectory +} + +// Client returns a Kubernetes client to be used by kubeadm. +// This function is implemented as a singleton, thus avoiding to recreate the client when it is used by different phases. +// Important. This function must be called after the admin.conf kubeconfig file is created. +func (d initData) Client() (clientset.Interface, error) { + if d.client == nil { + if d.dryRun { + // If we're dry-running; we should create a faked client that answers some GETs in order to be able to do the full init flow and just logs the rest of requests + dryRunGetter := apiclient.NewInitDryRunGetter(d.cfg.NodeRegistration.Name, d.cfg.Networking.ServiceSubnet) + d.client = apiclient.NewDryRunClient(dryRunGetter, os.Stdout) + } else { + // If we're acting for real, we should create a connection to the API server and wait for it to come up + var err error + d.client, err = kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetAdminKubeConfigPath()) + if err != nil { + return nil, err + } + } + } + return d.client, nil +} + +// Tokens returns an array of token strings. +func (d initData) Tokens() []string { + tokens := []string{} + for _, bt := range d.cfg.BootstrapTokens { + tokens = append(tokens, bt.Token.String()) + } + return tokens +} + // runInit executes master node provisioning func runInit(i *initData, out io.Writer) error { - fmt.Println("[preflight] running pre-flight checks") - if err := preflight.RunInitMasterChecks(utilsexec.New(), i.cfg, i.ignorePreflightErrors); err != nil { - return err - } - - if !i.dryRun { - fmt.Println("[preflight/images] Pulling images required for setting up a Kubernetes cluster") - fmt.Println("[preflight/images] This might take a minute or two, depending on the speed of your internet connection") - fmt.Println("[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'") - if err := preflight.RunPullImagesCheck(utilsexec.New(), i.cfg, i.ignorePreflightErrors); err != nil { - return err - } - } else { - fmt.Println("[preflight/images] Would pull the required images (like 'kubeadm config images pull')") - } // Get directories to write files to; can be faked if we're dry-running glog.V(1).Infof("[init] Getting certificates directory from configuration") diff --git a/cmd/kubeadm/app/cmd/phases/preflight.go b/cmd/kubeadm/app/cmd/phases/preflight.go index 662499b8e33..ac55b9a3796 100644 --- a/cmd/kubeadm/app/cmd/phases/preflight.go +++ b/cmd/kubeadm/app/cmd/phases/preflight.go @@ -21,11 +21,13 @@ import ( "fmt" "github.com/spf13/cobra" - + "k8s.io/apimachinery/pkg/util/sets" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" @@ -35,13 +37,9 @@ import ( ) var ( - masterPreflightLongDesc = normalizer.LongDesc(` - Run master pre-flight checks, functionally equivalent to what implemented by kubeadm init. - ` + cmdutil.AlphaDisclaimer) - masterPreflightExample = normalizer.Examples(` - # Run master pre-flight checks. - kubeadm alpha phase preflight master + # Run master pre-flight checks using a config file. + kubeadm init phase preflight --config kubeadm-config.yml `) nodePreflightLongDesc = normalizer.LongDesc(` @@ -56,6 +54,52 @@ var ( errorMissingConfigFlag = errors.New("the --config flag is mandatory") ) +// preflightMasterData defines the behavior that a runtime data struct passed to the PreflightMaster master phase +// should have. Please note that we are using an interface in order to make this phase reusable in different workflows +// (and thus with different runtime data struct, all of them requested to be compliant to this interface) +type preflightMasterData interface { + Cfg() *kubeadmapi.InitConfiguration + DryRun() bool + IgnorePreflightErrors() sets.String +} + +// NewPreflightMasterPhase creates a kubeadm workflow phase that implements preflight checks for a new master node. +func NewPreflightMasterPhase() workflow.Phase { + return workflow.Phase{ + Name: "preflight", + Short: "Run master pre-flight checks", + Long: "Run master pre-flight checks, functionally equivalent to what implemented by kubeadm init.", + Example: masterPreflightExample, + Run: runPreflightMaster, + } +} + +// runPreflightMaster executes preflight checks logic. +func runPreflightMaster(c workflow.RunData) error { + data, ok := c.(preflightMasterData) + if !ok { + return fmt.Errorf("preflight phase invoked with an invalid data struct") + } + + fmt.Println("[preflight] running pre-flight checks") + if err := preflight.RunInitMasterChecks(utilsexec.New(), data.Cfg(), data.IgnorePreflightErrors()); err != nil { + return nil + } + + if !data.DryRun() { + fmt.Println("[preflight] Pulling images required for setting up a Kubernetes cluster") + fmt.Println("[preflight] This might take a minute or two, depending on the speed of your internet connection") + fmt.Println("[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'") + if err := preflight.RunPullImagesCheck(utilsexec.New(), data.Cfg(), data.IgnorePreflightErrors()); err != nil { + return err + } + } else { + fmt.Println("[preflight] Would pull the required images (like 'kubeadm config images pull')") + } + + return nil +} + // NewCmdPreFlight calls cobra.Command for preflight checks func NewCmdPreFlight() *cobra.Command { var cfgPath string @@ -70,47 +114,11 @@ func NewCmdPreFlight() *cobra.Command { options.AddConfigFlag(cmd.PersistentFlags(), &cfgPath) options.AddIgnorePreflightErrorsFlag(cmd.PersistentFlags(), &ignorePreflightErrors) - cmd.AddCommand(NewCmdPreFlightMaster(&cfgPath, &ignorePreflightErrors)) cmd.AddCommand(NewCmdPreFlightNode(&cfgPath, &ignorePreflightErrors)) return cmd } -// NewCmdPreFlightMaster calls cobra.Command for master preflight checks -func NewCmdPreFlightMaster(cfgPath *string, ignorePreflightErrors *[]string) *cobra.Command { - - cmd := &cobra.Command{ - Use: "master", - Short: "Run master pre-flight checks", - Long: masterPreflightLongDesc, - Example: masterPreflightExample, - Run: func(cmd *cobra.Command, args []string) { - if len(*cfgPath) == 0 { - kubeadmutil.CheckErr(errorMissingConfigFlag) - } - ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(*ignorePreflightErrors) - kubeadmutil.CheckErr(err) - - cfg := &kubeadmapiv1beta1.InitConfiguration{} - kubeadmscheme.Scheme.Default(cfg) - - internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(*cfgPath, cfg) - kubeadmutil.CheckErr(err) - err = configutil.VerifyAPIServerBindAddress(internalcfg.APIEndpoint.AdvertiseAddress) - kubeadmutil.CheckErr(err) - - fmt.Println("[preflight] running pre-flight checks") - - err = preflight.RunInitMasterChecks(utilsexec.New(), internalcfg, ignorePreflightErrorsSet) - kubeadmutil.CheckErr(err) - - fmt.Println("[preflight] pre-flight checks passed") - }, - } - - return cmd -} - // NewCmdPreFlightNode calls cobra.Command for node preflight checks func NewCmdPreFlightNode(cfgPath *string, ignorePreflightErrors *[]string) *cobra.Command { cmd := &cobra.Command{ From 22da6a66a22e8a2c017cbdca6211c6b6439bd023 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 18 Oct 2018 11:52:15 +0200 Subject: [PATCH 06/19] autogenerated --- cmd/kubeadm/app/cmd/phases/BUILD | 2 ++ docs/.generated_docs | 6 ++++-- ...lpha_phase_preflight_master.md => kubeadm_init_phase.md} | 0 .../kubeadm_init_phase_preflight.md} | 0 docs/man/man1/kubeadm-init-phase-preflight.1 | 3 +++ docs/man/man1/kubeadm-init-phase.1 | 3 +++ 6 files changed, 12 insertions(+), 2 deletions(-) rename docs/admin/{kubeadm_alpha_phase_preflight_master.md => kubeadm_init_phase.md} (100%) rename docs/{man/man1/kubeadm-alpha-phase-preflight-master.1 => admin/kubeadm_init_phase_preflight.md} (100%) create mode 100644 docs/man/man1/kubeadm-init-phase-preflight.1 create mode 100644 docs/man/man1/kubeadm-init-phase.1 diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index bed47649f9f..24def1c2889 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -26,6 +26,7 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", "//cmd/kubeadm/app/cmd/phases/certs:go_default_library", + "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library", "//cmd/kubeadm/app/cmd/util:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", @@ -50,6 +51,7 @@ go_library( "//pkg/util/normalizer:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/docs/.generated_docs b/docs/.generated_docs index 294c91fd39f..e67c3285186 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -64,7 +64,6 @@ docs/admin/kubeadm_alpha_phase_kubelet_config_write-to-disk.md docs/admin/kubeadm_alpha_phase_kubelet_write-env-file.md docs/admin/kubeadm_alpha_phase_mark-master.md docs/admin/kubeadm_alpha_phase_preflight.md -docs/admin/kubeadm_alpha_phase_preflight_master.md docs/admin/kubeadm_alpha_phase_preflight_node.md docs/admin/kubeadm_alpha_phase_selfhosting.md docs/admin/kubeadm_alpha_phase_selfhosting_convert-from-staticpods.md @@ -81,6 +80,8 @@ docs/admin/kubeadm_config_upload_from-file.md docs/admin/kubeadm_config_upload_from-flags.md docs/admin/kubeadm_config_view.md docs/admin/kubeadm_init.md +docs/admin/kubeadm_init_phase.md +docs/admin/kubeadm_init_phase_preflight.md docs/admin/kubeadm_join.md docs/admin/kubeadm_reset.md docs/admin/kubeadm_token.md @@ -158,7 +159,6 @@ docs/man/man1/kubeadm-alpha-phase-kubelet-config.1 docs/man/man1/kubeadm-alpha-phase-kubelet-write-env-file.1 docs/man/man1/kubeadm-alpha-phase-kubelet.1 docs/man/man1/kubeadm-alpha-phase-mark-master.1 -docs/man/man1/kubeadm-alpha-phase-preflight-master.1 docs/man/man1/kubeadm-alpha-phase-preflight-node.1 docs/man/man1/kubeadm-alpha-phase-preflight.1 docs/man/man1/kubeadm-alpha-phase-selfhosting-convert-from-staticpods.1 @@ -177,6 +177,8 @@ docs/man/man1/kubeadm-config-upload-from-flags.1 docs/man/man1/kubeadm-config-upload.1 docs/man/man1/kubeadm-config-view.1 docs/man/man1/kubeadm-config.1 +docs/man/man1/kubeadm-init-phase-preflight.1 +docs/man/man1/kubeadm-init-phase.1 docs/man/man1/kubeadm-init.1 docs/man/man1/kubeadm-join.1 docs/man/man1/kubeadm-reset.1 diff --git a/docs/admin/kubeadm_alpha_phase_preflight_master.md b/docs/admin/kubeadm_init_phase.md similarity index 100% rename from docs/admin/kubeadm_alpha_phase_preflight_master.md rename to docs/admin/kubeadm_init_phase.md diff --git a/docs/man/man1/kubeadm-alpha-phase-preflight-master.1 b/docs/admin/kubeadm_init_phase_preflight.md similarity index 100% rename from docs/man/man1/kubeadm-alpha-phase-preflight-master.1 rename to docs/admin/kubeadm_init_phase_preflight.md diff --git a/docs/man/man1/kubeadm-init-phase-preflight.1 b/docs/man/man1/kubeadm-init-phase-preflight.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-init-phase-preflight.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-init-phase.1 b/docs/man/man1/kubeadm-init-phase.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-init-phase.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. From 91049ef77b1d9c37b87f9e0e702e8b60a5bfd2a2 Mon Sep 17 00:00:00 2001 From: "Rostislav M. Georgiev" Date: Thu, 4 Oct 2018 13:02:56 +0300 Subject: [PATCH 07/19] kubeadm: Introduce config print init/join-defaults In order to improve the UX of kubeadm, it was decided to introduce the following subcommands: - `kubeadm config print` - this is currently only a placeholder for subcommands that deal printing of some kind of configuration. - `kubeadm config print init-defaults` - prints the default combination of InitConfiguration and ClusterConfiguration. Selected component configs can be printed too if the `--component-configs` command line switch is used. - `kubeadm config print join-defaults` - prints the default JoinConfiguration. This command also supports the use of `--component-configs`. - `kubeadm config print-defaults` is deprecated in favor of `kubeadm config print init/join-defaults`. Signed-off-by: Rostislav M. Georgiev --- cmd/kubeadm/app/cmd/BUILD | 2 + cmd/kubeadm/app/cmd/config.go | 106 +++++++++++++++--- cmd/kubeadm/app/cmd/config_test.go | 100 +++++++++++++++++ docs/.generated_docs | 7 +- ...int-default.md => kubeadm_config_print.md} | 0 .../kubeadm_config_print_init-defaults.md | 3 + .../kubeadm_config_print_join-defaults.md | 3 + .../man1/kubeadm-config-print-init-defaults.1 | 3 + .../man1/kubeadm-config-print-join-defaults.1 | 3 + docs/man/man1/kubeadm-config-print.1 | 3 + 10 files changed, 215 insertions(+), 15 deletions(-) rename docs/admin/{kubeadm_config_print-default.md => kubeadm_config_print.md} (100%) create mode 100644 docs/admin/kubeadm_config_print_init-defaults.md create mode 100644 docs/admin/kubeadm_config_print_join-defaults.md create mode 100644 docs/man/man1/kubeadm-config-print-init-defaults.1 create mode 100644 docs/man/man1/kubeadm-config-print-join-defaults.1 create mode 100644 docs/man/man1/kubeadm-config-print.1 diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index 1900bc92803..6127f5fc159 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -95,9 +95,11 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", + "//cmd/kubeadm/app/componentconfigs:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", + "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/runtime:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index 0f5bea4b5fd..c8cb8a80473 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -50,8 +50,8 @@ import ( ) var ( - // sillyToken is only set statically to make kubeadm not randomize the token on every run - sillyToken = kubeadmapiv1beta1.BootstrapToken{ + // placeholderToken is only set statically to make kubeadm not randomize the token on every run + placeholderToken = kubeadmapiv1beta1.BootstrapToken{ Token: &kubeadmapiv1beta1.BootstrapTokenString{ ID: "abcdef", Secret: "0123456789abcdef", @@ -83,6 +83,7 @@ func NewCmdConfig(out io.Writer) *cobra.Command { options.AddKubeConfigFlag(cmd.PersistentFlags(), &kubeConfigFile) kubeConfigFile = cmdutil.FindExistingKubeConfig(kubeConfigFile) + cmd.AddCommand(NewCmdConfigPrint(out)) cmd.AddCommand(NewCmdConfigPrintDefault(out)) cmd.AddCommand(NewCmdConfigMigrate(out)) cmd.AddCommand(NewCmdConfigUpload(out, &kubeConfigFile)) @@ -91,6 +92,63 @@ func NewCmdConfig(out io.Writer) *cobra.Command { return cmd } +// NewCmdConfigPrint returns cobra.Command for "kubeadm config print" command +func NewCmdConfigPrint(out io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "print", + Short: "Print configuration", + Long: "This command prints configurations for subcommands provided.", + RunE: cmdutil.SubCmdRunE("print"), + } + cmd.AddCommand(NewCmdConfigPrintInitDefaults(out)) + cmd.AddCommand(NewCmdConfigPrintJoinDefaults(out)) + return cmd +} + +// NewCmdConfigPrintInitDefaults returns cobra.Command for "kubeadm config print init-defaults" command +func NewCmdConfigPrintInitDefaults(out io.Writer) *cobra.Command { + return newCmdConfigPrintActionDefaults(out, "init", getDefaultInitConfigBytes) +} + +// NewCmdConfigPrintJoinDefaults returns cobra.Command for "kubeadm config print join-defaults" command +func NewCmdConfigPrintJoinDefaults(out io.Writer) *cobra.Command { + return newCmdConfigPrintActionDefaults(out, "join", getDefaultNodeConfigBytes) +} + +func newCmdConfigPrintActionDefaults(out io.Writer, action string, configBytesProc func() ([]byte, error)) *cobra.Command { + componentConfigs := []string{} + cmd := &cobra.Command{ + Use: fmt.Sprintf("%s-defaults", action), + Short: fmt.Sprintf("Print default %s configuration, that can be used for 'kubeadm %s'", action, action), + Long: fmt.Sprintf(dedent.Dedent(` + This command prints objects such as the default %s configuration that is used for 'kubeadm %s'. + + Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like %q in order to pass validation but + not perform the real computation for creating a token. + `), action, action, placeholderToken), + Run: func(cmd *cobra.Command, args []string) { + runConfigPrintActionDefaults(out, componentConfigs, configBytesProc) + }, + } + cmd.Flags().StringSliceVar(&componentConfigs, "component-configs", componentConfigs, + fmt.Sprintf("A comma-separated list for component config API objects to print the default values for. Available values: %v. If this flag is not set, no component configs will be printed.", getSupportedComponentConfigAPIObjects())) + return cmd +} + +func runConfigPrintActionDefaults(out io.Writer, componentConfigs []string, configBytesProc func() ([]byte, error)) { + initialConfig, err := configBytesProc() + kubeadmutil.CheckErr(err) + + allBytes := [][]byte{initialConfig} + for _, componentConfig := range componentConfigs { + cfgBytes, err := getDefaultComponentConfigAPIObjectBytes(componentConfig) + kubeadmutil.CheckErr(err) + allBytes = append(allBytes, cfgBytes) + } + + fmt.Fprint(out, string(bytes.Join(allBytes, []byte(constants.YAMLDocumentSeparator)))) +} + // NewCmdConfigPrintDefault returns cobra.Command for "kubeadm config print-default" command func NewCmdConfigPrintDefault(out io.Writer) *cobra.Command { apiObjects := []string{} @@ -104,9 +162,10 @@ func NewCmdConfigPrintDefault(out io.Writer) *cobra.Command { For documentation visit: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3 - Note that sensitive values like the Bootstrap Token fields are replaced with silly values like %q in order to pass validation but + Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like %q in order to pass validation but not perform the real computation for creating a token. - `), sillyToken), + `), placeholderToken), + Deprecated: "Please, use `kubeadm config print` instead.", Run: func(cmd *cobra.Command, args []string) { if len(apiObjects) == 0 { apiObjects = getSupportedAPIObjects() @@ -125,13 +184,21 @@ func NewCmdConfigPrintDefault(out io.Writer) *cobra.Command { return cmd } +func getDefaultComponentConfigAPIObjectBytes(apiObject string) ([]byte, error) { + registration, ok := componentconfigs.Known[componentconfigs.RegistrationKind(apiObject)] + if !ok { + return []byte{}, fmt.Errorf("--component-configs needs to contain some of %v", getSupportedComponentConfigAPIObjects()) + } + return getDefaultComponentConfigBytes(registration) +} + func getDefaultAPIObjectBytes(apiObject string) ([]byte, error) { switch apiObject { case constants.InitConfigurationKind: - return getDefaultInitConfigBytes(constants.InitConfigurationKind) + return getDefaultInitConfigBytesByKind(constants.InitConfigurationKind) case constants.ClusterConfigurationKind: - return getDefaultInitConfigBytes(constants.ClusterConfigurationKind) + return getDefaultInitConfigBytesByKind(constants.ClusterConfigurationKind) case constants.JoinConfigurationKind: return getDefaultNodeConfigBytes() @@ -146,15 +213,23 @@ func getDefaultAPIObjectBytes(apiObject string) ([]byte, error) { } } -// getSupportedAPIObjects returns all currently supported API object names -func getSupportedAPIObjects() []string { - objects := []string{constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind} +// getSupportedComponentConfigAPIObjects returns all currently supported component config API object names +func getSupportedComponentConfigAPIObjects() []string { + objects := []string{} for componentType := range componentconfigs.Known { objects = append(objects, string(componentType)) } return objects } +// getSupportedAPIObjects returns all currently supported API object names +func getSupportedAPIObjects() []string { + baseObjects := []string{constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind} + objects := getSupportedComponentConfigAPIObjects() + objects = append(objects, baseObjects...) + return objects +} + // getAllAPIObjectNames returns currently supported API object names and their historical aliases // NB. currently there is no historical supported API objects, but we keep this function for future changes func getAllAPIObjectNames() []string { @@ -171,18 +246,21 @@ func getDefaultedInitConfig() (*kubeadmapi.InitConfiguration, error) { ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{ KubernetesVersion: fmt.Sprintf("v1.%d.0", constants.MinimumControlPlaneVersion.Minor()+1), }, - BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{sillyToken}, + BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{placeholderToken}, }) } -// TODO: This is now invoked for both InitConfiguration and ClusterConfiguration, we should make separate versions of it -func getDefaultInitConfigBytes(kind string) ([]byte, error) { +func getDefaultInitConfigBytes() ([]byte, error) { internalcfg, err := getDefaultedInitConfig() if err != nil { return []byte{}, err } - b, err := configutil.MarshalKubeadmConfigObject(internalcfg) + return configutil.MarshalKubeadmConfigObject(internalcfg) +} + +func getDefaultInitConfigBytesByKind(kind string) ([]byte, error) { + b, err := getDefaultInitConfigBytes() if err != nil { return []byte{}, err } @@ -197,7 +275,7 @@ func getDefaultNodeConfigBytes() ([]byte, error) { internalcfg, err := configutil.JoinConfigFileAndDefaultsToInternalConfig("", &kubeadmapiv1beta1.JoinConfiguration{ Discovery: kubeadmapiv1beta1.Discovery{ BootstrapToken: &kubeadmapiv1beta1.BootstrapTokenDiscovery{ - Token: sillyToken.Token.String(), + Token: placeholderToken.Token.String(), APIServerEndpoints: []string{"kube-apiserver:6443"}, UnsafeSkipCAVerification: true, // TODO: UnsafeSkipCAVerification: true needs to be set for validation to pass, but shouldn't be recommended as the default }, diff --git a/cmd/kubeadm/app/cmd/config_test.go b/cmd/kubeadm/app/cmd/config_test.go index 452b42d3d7c..7af79bc3bef 100644 --- a/cmd/kubeadm/app/cmd/config_test.go +++ b/cmd/kubeadm/app/cmd/config_test.go @@ -18,17 +18,24 @@ package cmd_test import ( "bytes" + "io" "io/ioutil" "os" "path/filepath" + "reflect" + "sort" "strings" "testing" "github.com/renstrom/dedent" + "github.com/spf13/cobra" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/cmd" + "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime" "k8s.io/utils/exec" @@ -272,3 +279,96 @@ func tempConfig(t *testing.T, config []byte) (string, func()) { os.RemoveAll(tmpDir) } } + +func TestNewCmdConfigPrintActionDefaults(t *testing.T) { + tests := []struct { + name string + expectedKinds []string // need to be sorted + componentConfigs string + cmdProc func(out io.Writer) *cobra.Command + }{ + { + name: "InitConfiguration: No component configs", + expectedKinds: []string{ + constants.ClusterConfigurationKind, + constants.InitConfigurationKind, + }, + cmdProc: cmd.NewCmdConfigPrintInitDefaults, + }, + { + name: "InitConfiguration: KubeProxyConfiguration", + expectedKinds: []string{ + constants.ClusterConfigurationKind, + constants.InitConfigurationKind, + string(componentconfigs.KubeProxyConfigurationKind), + }, + componentConfigs: "KubeProxyConfiguration", + cmdProc: cmd.NewCmdConfigPrintInitDefaults, + }, + { + name: "InitConfiguration: KubeProxyConfiguration and KubeletConfiguration", + expectedKinds: []string{ + constants.ClusterConfigurationKind, + constants.InitConfigurationKind, + string(componentconfigs.KubeProxyConfigurationKind), + string(componentconfigs.KubeletConfigurationKind), + }, + componentConfigs: "KubeProxyConfiguration,KubeletConfiguration", + cmdProc: cmd.NewCmdConfigPrintInitDefaults, + }, + { + name: "JoinConfiguration: No component configs", + expectedKinds: []string{ + constants.JoinConfigurationKind, + }, + cmdProc: cmd.NewCmdConfigPrintJoinDefaults, + }, + { + name: "JoinConfiguration: KubeProxyConfiguration", + expectedKinds: []string{ + constants.JoinConfigurationKind, + string(componentconfigs.KubeProxyConfigurationKind), + }, + componentConfigs: "KubeProxyConfiguration", + cmdProc: cmd.NewCmdConfigPrintJoinDefaults, + }, + { + name: "JoinConfiguration: KubeProxyConfiguration and KubeletConfiguration", + expectedKinds: []string{ + constants.JoinConfigurationKind, + string(componentconfigs.KubeProxyConfigurationKind), + string(componentconfigs.KubeletConfigurationKind), + }, + componentConfigs: "KubeProxyConfiguration,KubeletConfiguration", + cmdProc: cmd.NewCmdConfigPrintJoinDefaults, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var output bytes.Buffer + + command := test.cmdProc(&output) + if err := command.Flags().Set("component-configs", test.componentConfigs); err != nil { + t.Fatalf("failed to set component-configs flag") + } + command.Run(nil, nil) + + gvkmap, err := kubeadmutil.SplitYAMLDocuments(output.Bytes()) + if err != nil { + t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err) + } + + gotKinds := []string{} + for gvk := range gvkmap { + gotKinds = append(gotKinds, gvk.Kind) + } + + sort.Strings(gotKinds) + + if !reflect.DeepEqual(gotKinds, test.expectedKinds) { + t.Fatalf("kinds not matching:\n\texpectedKinds: %v\n\tgotKinds: %v\n", test.expectedKinds, gotKinds) + } + }) + } +} diff --git a/docs/.generated_docs b/docs/.generated_docs index 294c91fd39f..9035112ac2b 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -75,7 +75,9 @@ docs/admin/kubeadm_config_images.md docs/admin/kubeadm_config_images_list.md docs/admin/kubeadm_config_images_pull.md docs/admin/kubeadm_config_migrate.md -docs/admin/kubeadm_config_print-default.md +docs/admin/kubeadm_config_print.md +docs/admin/kubeadm_config_print_init-defaults.md +docs/admin/kubeadm_config_print_join-defaults.md docs/admin/kubeadm_config_upload.md docs/admin/kubeadm_config_upload_from-file.md docs/admin/kubeadm_config_upload_from-flags.md @@ -172,6 +174,9 @@ docs/man/man1/kubeadm-config-images-pull.1 docs/man/man1/kubeadm-config-images.1 docs/man/man1/kubeadm-config-migrate.1 docs/man/man1/kubeadm-config-print-default.1 +docs/man/man1/kubeadm-config-print-init-defaults.1 +docs/man/man1/kubeadm-config-print-join-defaults.1 +docs/man/man1/kubeadm-config-print.1 docs/man/man1/kubeadm-config-upload-from-file.1 docs/man/man1/kubeadm-config-upload-from-flags.1 docs/man/man1/kubeadm-config-upload.1 diff --git a/docs/admin/kubeadm_config_print-default.md b/docs/admin/kubeadm_config_print.md similarity index 100% rename from docs/admin/kubeadm_config_print-default.md rename to docs/admin/kubeadm_config_print.md diff --git a/docs/admin/kubeadm_config_print_init-defaults.md b/docs/admin/kubeadm_config_print_init-defaults.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_config_print_init-defaults.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_config_print_join-defaults.md b/docs/admin/kubeadm_config_print_join-defaults.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_config_print_join-defaults.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-config-print-init-defaults.1 b/docs/man/man1/kubeadm-config-print-init-defaults.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-config-print-init-defaults.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-config-print-join-defaults.1 b/docs/man/man1/kubeadm-config-print-join-defaults.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-config-print-join-defaults.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-config-print.1 b/docs/man/man1/kubeadm-config-print.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-config-print.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. From 8c9cb0c71f1653d547bf35f50a92ac2f31fb42f1 Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Thu, 18 Oct 2018 18:45:17 +0300 Subject: [PATCH 08/19] docs/admin: add OWNERS file with cmd/kubeadm approvers --- docs/admin/OWNERS | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 docs/admin/OWNERS diff --git a/docs/admin/OWNERS b/docs/admin/OWNERS new file mode 100644 index 00000000000..42ddcf79b6e --- /dev/null +++ b/docs/admin/OWNERS @@ -0,0 +1,10 @@ +approvers: +- luxas +- timothysc +- fabriziopandini +reviewers: +- luxas +- timothysc +- fabriziopandini +labels: +- kind/documentation From 8af042af6df85327a4d6efbb7439da4b917042e1 Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Fri, 19 Oct 2018 19:30:32 +0800 Subject: [PATCH 09/19] fix reference to controlManagerExtraAargs Signed-off-by: yuexiao-wang --- cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go | 2 +- cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go index b3c3c54af9f..6d6c2309c0b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go @@ -210,7 +210,7 @@ limitations under the License. // controlPlaneEndpoint: "10.100.0.1:6443" // apiServerExtraArgs: // authorization-mode: "Node,RBAC" -// controlManagerExtraArgs: +// controllerManagerExtraArgs: // node-cidr-mask-size: 20 // schedulerExtraArgs: // address: "10.100.0.1" diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go index 9a356757a57..4c3179424d4 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go @@ -202,7 +202,7 @@ limitations under the License. // controlPlaneEndpoint: "10.100.0.1:6443" // apiServerExtraArgs: // authorization-mode: "Node,RBAC" -// controlManagerExtraArgs: +// controllerManagerExtraArgs: // node-cidr-mask-size: 20 // schedulerExtraArgs: // address: "10.100.0.1" From 41937c21c00fc90d5e486d186df28233d8f0004b Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Oct 2018 22:20:25 +0800 Subject: [PATCH 10/19] clean up redundant code --- cmd/kubeadm/app/cmd/join.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 0490c9b1728..817ebc58513 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -447,11 +447,6 @@ func (j *Join) CheckIfReadyForAdditionalControlPlane(initConfiguration *kubeadma // PrepareForHostingControlPlane makes all preparation activities require for a node hosting a new control plane instance func (j *Join) PrepareForHostingControlPlane(initConfiguration *kubeadmapi.InitConfiguration) error { - // Creates the admin kubeconfig file for the admin and for kubeadm itself. - if err := kubeconfigphase.CreateAdminKubeConfigFile(kubeadmconstants.KubernetesDir, initConfiguration); err != nil { - return errors.Wrap(err, "error generating the admin kubeconfig file") - } - // Generate missing certificates (if any) if err := certsphase.CreatePKIAssets(initConfiguration); err != nil { return err From 8906d1d8beb9335c438d3d093a064512b901e61c Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Thu, 18 Oct 2018 15:12:31 -0700 Subject: [PATCH 11/19] debian-base: purge libsystemd0 to eliminate CVE false-positives --- build/debian-base/Dockerfile.build | 1 + build/debian-base/Makefile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build/debian-base/Dockerfile.build b/build/debian-base/Dockerfile.build index 7b7b99d3a6a..cdd026b08f1 100644 --- a/build/debian-base/Dockerfile.build +++ b/build/debian-base/Dockerfile.build @@ -56,6 +56,7 @@ RUN echo "Yes, do as I say!" | apt-get purge \ libprocps6 \ libslang2 \ libss2 \ + libsystemd0 \ libtext-charwidth-perl libtext-iconv-perl libtext-wrapi18n-perl \ ncurses-base \ ncurses-bin \ diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 379fa491962..e472e95b24f 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -18,7 +18,7 @@ REGISTRY ?= staging-k8s.gcr.io IMAGE ?= $(REGISTRY)/debian-base BUILD_IMAGE ?= debian-build -TAG ?= 0.3.2 +TAG ?= 0.4.0 TAR_FILE ?= rootfs.tar ARCH?=amd64 From 7a8696c331c055332924ff4b77af9eab25071319 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 19 Oct 2018 08:49:08 -0700 Subject: [PATCH 12/19] Update to debian-base 0.4.0 Additionally, update the addon-manager to use kubectl v1.11.3. --- build/debian-hyperkube-base/Makefile | 4 ++-- build/debian-iptables/Makefile | 4 ++-- cluster/addons/addon-manager/CHANGELOG.md | 4 ++++ cluster/addons/addon-manager/Makefile | 6 +++--- test/e2e/common/runtime.go | 2 +- test/images/pets/peer-finder/BASEIMAGE | 8 ++++---- test/images/pets/peer-finder/VERSION | 2 +- test/images/pets/redis-installer/BASEIMAGE | 8 ++++---- test/images/pets/redis-installer/VERSION | 2 +- test/images/pets/zookeeper-installer/BASEIMAGE | 8 ++++---- test/images/pets/zookeeper-installer/VERSION | 2 +- test/images/resource-consumer/BASEIMAGE | 8 ++++---- test/images/resource-consumer/VERSION | 2 +- 13 files changed, 32 insertions(+), 28 deletions(-) diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index d9c0d87a00a..f402fe04a90 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -19,12 +19,12 @@ REGISTRY?=staging-k8s.gcr.io IMAGE?=$(REGISTRY)/debian-hyperkube-base -TAG=0.11.0 +TAG=0.12.0 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x CACHEBUST?=1 -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3.2 +BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0 CNI_VERSION=v0.6.0 TEMP_DIR:=$(shell mktemp -d) diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index 8c793db5aac..0d90adfcbb6 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -16,12 +16,12 @@ REGISTRY?="staging-k8s.gcr.io" IMAGE=$(REGISTRY)/debian-iptables -TAG?=v10.2 +TAG?=v11.0 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x TEMP_DIR:=$(shell mktemp -d) -BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.3.2 +BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.0 # This option is for running docker manifest command export DOCKER_CLI_EXPERIMENTAL := enabled diff --git a/cluster/addons/addon-manager/CHANGELOG.md b/cluster/addons/addon-manager/CHANGELOG.md index b4ac954e49a..dc48eab6191 100644 --- a/cluster/addons/addon-manager/CHANGELOG.md +++ b/cluster/addons/addon-manager/CHANGELOG.md @@ -1,3 +1,7 @@ +### Version 8.9 (Fri October 19 2018 Jeff Grafton ) + - Update to use debian-base:0.4.0. + - Update kubectl to v1.11.3. + ### Version 8.8 (Mon October 1 2018 Zihong Zheng ) - Update to use debian-base:0.3.2. diff --git a/cluster/addons/addon-manager/Makefile b/cluster/addons/addon-manager/Makefile index 7297f048282..b79ea4b444c 100644 --- a/cluster/addons/addon-manager/Makefile +++ b/cluster/addons/addon-manager/Makefile @@ -15,10 +15,10 @@ IMAGE=staging-k8s.gcr.io/kube-addon-manager ARCH?=amd64 TEMP_DIR:=$(shell mktemp -d) -VERSION=v8.8 -KUBECTL_VERSION?=v1.10.7 +VERSION=v8.9 +KUBECTL_VERSION?=v1.11.3 -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3.2 +BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0 .PHONY: build push diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index bd55a305632..69b7bbf82f2 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -276,7 +276,7 @@ while true; do sleep 1; done }, { description: "should be able to pull image from gcr.io", - image: "gcr.io/google-containers/debian-base:0.3.2", + image: "gcr.io/google-containers/debian-base:0.4.0", phase: v1.PodRunning, waiting: false, }, diff --git a/test/images/pets/peer-finder/BASEIMAGE b/test/images/pets/peer-finder/BASEIMAGE index 67f1b591b38..975e946af60 100644 --- a/test/images/pets/peer-finder/BASEIMAGE +++ b/test/images/pets/peer-finder/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3.2 -arm=k8s.gcr.io/debian-base-arm:0.3.2 -arm64=k8s.gcr.io/debian-base-arm64:0.3.2 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3.2 +amd64=k8s.gcr.io/debian-base-amd64:0.4.0 +arm=k8s.gcr.io/debian-base-arm:0.4.0 +arm64=k8s.gcr.io/debian-base-arm64:0.4.0 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.4.0 diff --git a/test/images/pets/peer-finder/VERSION b/test/images/pets/peer-finder/VERSION index 5625e59da88..7e32cd56983 100644 --- a/test/images/pets/peer-finder/VERSION +++ b/test/images/pets/peer-finder/VERSION @@ -1 +1 @@ -1.2 +1.3 diff --git a/test/images/pets/redis-installer/BASEIMAGE b/test/images/pets/redis-installer/BASEIMAGE index 67f1b591b38..975e946af60 100644 --- a/test/images/pets/redis-installer/BASEIMAGE +++ b/test/images/pets/redis-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3.2 -arm=k8s.gcr.io/debian-base-arm:0.3.2 -arm64=k8s.gcr.io/debian-base-arm64:0.3.2 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3.2 +amd64=k8s.gcr.io/debian-base-amd64:0.4.0 +arm=k8s.gcr.io/debian-base-arm:0.4.0 +arm64=k8s.gcr.io/debian-base-arm64:0.4.0 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.4.0 diff --git a/test/images/pets/redis-installer/VERSION b/test/images/pets/redis-installer/VERSION index 9459d4ba2a0..5625e59da88 100644 --- a/test/images/pets/redis-installer/VERSION +++ b/test/images/pets/redis-installer/VERSION @@ -1 +1 @@ -1.1 +1.2 diff --git a/test/images/pets/zookeeper-installer/BASEIMAGE b/test/images/pets/zookeeper-installer/BASEIMAGE index 67f1b591b38..975e946af60 100644 --- a/test/images/pets/zookeeper-installer/BASEIMAGE +++ b/test/images/pets/zookeeper-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3.2 -arm=k8s.gcr.io/debian-base-arm:0.3.2 -arm64=k8s.gcr.io/debian-base-arm64:0.3.2 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3.2 +amd64=k8s.gcr.io/debian-base-amd64:0.4.0 +arm=k8s.gcr.io/debian-base-arm:0.4.0 +arm64=k8s.gcr.io/debian-base-arm64:0.4.0 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.4.0 diff --git a/test/images/pets/zookeeper-installer/VERSION b/test/images/pets/zookeeper-installer/VERSION index 9459d4ba2a0..5625e59da88 100644 --- a/test/images/pets/zookeeper-installer/VERSION +++ b/test/images/pets/zookeeper-installer/VERSION @@ -1 +1 @@ -1.1 +1.2 diff --git a/test/images/resource-consumer/BASEIMAGE b/test/images/resource-consumer/BASEIMAGE index 67f1b591b38..975e946af60 100644 --- a/test/images/resource-consumer/BASEIMAGE +++ b/test/images/resource-consumer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3.2 -arm=k8s.gcr.io/debian-base-arm:0.3.2 -arm64=k8s.gcr.io/debian-base-arm64:0.3.2 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3.2 +amd64=k8s.gcr.io/debian-base-amd64:0.4.0 +arm=k8s.gcr.io/debian-base-arm:0.4.0 +arm64=k8s.gcr.io/debian-base-arm64:0.4.0 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.4.0 diff --git a/test/images/resource-consumer/VERSION b/test/images/resource-consumer/VERSION index 7e32cd56983..c068b2447cc 100644 --- a/test/images/resource-consumer/VERSION +++ b/test/images/resource-consumer/VERSION @@ -1 +1 @@ -1.3 +1.4 From f87d2c61f37f679b9696b3ac99e1e396a079fd07 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Mon, 15 Oct 2018 13:23:49 -0700 Subject: [PATCH 13/19] Add api-machinery 'watch-consistency' e2e test --- test/e2e/apimachinery/watch.go | 112 +++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 2ca292bda2c..4709f9c0418 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -17,6 +17,8 @@ limitations under the License. package apimachinery import ( + "fmt" + "math/rand" "time" "k8s.io/api/core/v1" @@ -314,6 +316,49 @@ var _ = SIGDescribe("Watchers", func() { expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate) expectEvent(testWatch, watch.Deleted, nil) }) + + /* + Testname: watch-consistency + Description: Ensure that concurrent watches are consistent with each other by initiating an additional watch + for events received from the first watch, initiated at the resource version of the event, and checking that all + resource versions of all events match. Events are produced from writes on a background goroutine. + */ + It("should receive events on concurrent watches in same order", func() { + c := f.ClientSet + ns := f.Namespace.Name + + iterations := 100 + + By("starting a background goroutine to produce watch events") + donec := make(chan struct{}) + stopc := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(donec) + produceConfigMapEvents(f, stopc, 5*time.Millisecond) + }() + + By("creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order") + wcs := []watch.Interface{} + resourceVersion := "0" + for i := 0; i < iterations; i++ { + wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion}) + Expect(err).NotTo(HaveOccurred()) + wcs = append(wcs, wc) + resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion + for _, wc := range wcs[1:] { + e := waitForNextConfigMapEvent(wc) + if resourceVersion != e.ResourceVersion { + framework.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion) + } + } + } + close(stopc) + for _, wc := range wcs { + wc.Stop() + } + <-donec + }) }) func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) { @@ -381,3 +426,70 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru } } } + +func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap { + select { + case event := <-watch.ResultChan(): + if configMap, ok := event.Object.(*v1.ConfigMap); ok { + return configMap + } else { + framework.Failf("expected config map") + } + case <-time.After(10 * time.Second): + framework.Failf("timed out waiting for watch event") + } + return nil // should never happen +} + +const ( + createEvent = iota + updateEvent + deleteEvent +) + +func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) { + c := f.ClientSet + ns := f.Namespace.Name + + name := func(i int) string { + return fmt.Sprintf("cm-%d", i) + } + + existing := []int{} + tc := time.NewTicker(minWaitBetweenEvents) + defer tc.Stop() + i := 0 + for range tc.C { + op := rand.Intn(3) + if len(existing) == 0 { + op = createEvent + } + + cm := &v1.ConfigMap{} + switch op { + case createEvent: + cm.Name = name(i) + _, err := c.CoreV1().ConfigMaps(ns).Create(cm) + Expect(err).NotTo(HaveOccurred()) + existing = append(existing, i) + i += 1 + case updateEvent: + idx := rand.Intn(len(existing)) + cm.Name = name(existing[idx]) + _, err := c.CoreV1().ConfigMaps(ns).Update(cm) + Expect(err).NotTo(HaveOccurred()) + case deleteEvent: + idx := rand.Intn(len(existing)) + err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + existing = append(existing[:idx], existing[idx+1:]...) + default: + framework.Failf("Unsupported event operation: %d", op) + } + select { + case <-stopc: + return + default: + } + } +} From 2908174517cc1ded8a3dc283d2e8e34d7ad9be7a Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Fri, 19 Oct 2018 17:00:43 -0400 Subject: [PATCH 14/19] pass in stopCh to cloud provider Initialize method for custom controllers --- cmd/cloud-controller-manager/app/controllermanager.go | 2 +- cmd/kube-controller-manager/app/controllermanager.go | 2 +- pkg/cloudprovider/providers/aws/aws.go | 2 +- pkg/cloudprovider/providers/azure/azure.go | 2 +- pkg/cloudprovider/providers/cloudstack/cloudstack.go | 3 ++- pkg/cloudprovider/providers/fake/fake.go | 3 ++- pkg/cloudprovider/providers/gce/gce.go | 2 +- pkg/cloudprovider/providers/openstack/openstack.go | 3 ++- pkg/cloudprovider/providers/ovirt/ovirt.go | 3 ++- pkg/cloudprovider/providers/photon/photon.go | 3 ++- pkg/cloudprovider/providers/vsphere/vsphere.go | 2 +- staging/src/k8s.io/cloud-provider/cloud.go | 5 +++-- 12 files changed, 19 insertions(+), 13 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index d01600646f2..4fa93a65b29 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -202,7 +202,7 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stop <-chan stru } if cloud != nil { // Initialize the cloud provider with a reference to the clientBuilder - cloud.Initialize(c.ClientBuilder) + cloud.Initialize(c.ClientBuilder, stop) } // Start the CloudNodeController nodeController := cloudcontrollers.NewCloudNodeController( diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 1dab0461221..b417071c5fd 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -468,7 +468,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co // Initialize the cloud provider with a reference to the clientBuilder only after token controller // has started in case the cloud provider uses the client builder. if ctx.Cloud != nil { - ctx.Cloud.Initialize(ctx.ClientBuilder) + ctx.Cloud.Initialize(ctx.ClientBuilder, ctx.Stop) } for controllerName, initFn := range controllers { diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 02f7c783b22..dbad7cdd750 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1159,7 +1159,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) { +func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { c.clientBuilder = clientBuilder c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider") c.eventBroadcaster = record.NewBroadcaster() diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 59115591c2d..c80859926e0 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -390,7 +390,7 @@ func parseConfig(configReader io.Reader) (*Config, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) { +func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") az.eventBroadcaster = record.NewBroadcaster() az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")}) diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack.go b/pkg/cloudprovider/providers/cloudstack/cloudstack.go index 24846525ed0..f0a08b77bd2 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack.go @@ -121,7 +121,8 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (cs *CSCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) {} +func (cs *CSCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // LoadBalancer returns an implementation of LoadBalancer for CloudStack. func (cs *CSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { diff --git a/pkg/cloudprovider/providers/fake/fake.go b/pkg/cloudprovider/providers/fake/fake.go index 4d125fa0f4c..8adf2fed214 100644 --- a/pkg/cloudprovider/providers/fake/fake.go +++ b/pkg/cloudprovider/providers/fake/fake.go @@ -97,7 +97,8 @@ func (f *FakeCloud) ClearCalls() { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (f *FakeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) {} +func (f *FakeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} func (f *FakeCloud) ListClusters(ctx context.Context) ([]string, error) { return f.ClusterList, f.Err diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index aac5468a962..857c266fcf3 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -610,7 +610,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic // Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap. // This must be called before utilizing the funcs of gce.ClusterID -func (gce *GCECloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) { +func (gce *GCECloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { gce.clientBuilder = clientBuilder gce.client = clientBuilder.ClientOrDie("cloud-provider") diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index f1cf73146e5..fbcee0f42a9 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -367,7 +367,8 @@ func newOpenStack(cfg Config) (*OpenStack, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (os *OpenStack) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) {} +func (os *OpenStack) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // mapNodeNameToServerName maps a k8s NodeName to an OpenStack Server Name // This is a simple string cast. diff --git a/pkg/cloudprovider/providers/ovirt/ovirt.go b/pkg/cloudprovider/providers/ovirt/ovirt.go index ae3fb862dd4..9937919ed16 100644 --- a/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -117,7 +117,8 @@ func newOVirtCloud(config io.Reader) (*OVirtCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (v *OVirtCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) {} +func (v *OVirtCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} func (v *OVirtCloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false diff --git a/pkg/cloudprovider/providers/photon/photon.go b/pkg/cloudprovider/providers/photon/photon.go index 583a00b3091..bcf5e313449 100644 --- a/pkg/cloudprovider/providers/photon/photon.go +++ b/pkg/cloudprovider/providers/photon/photon.go @@ -286,7 +286,8 @@ func newPCCloud(cfg PCConfig) (*PCCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (pc *PCCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) {} +func (pc *PCCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // Instances returns an implementation of Instances for Photon Controller. func (pc *PCCloud) Instances() (cloudprovider.Instances, bool) { diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 02589b7a03c..7e22407ed5b 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -241,7 +241,7 @@ func init() { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (vs *VSphere) Initialize(clientBuilder cloudprovider.ControllerClientBuilder) { +func (vs *VSphere) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { } // Initialize Node Informers diff --git a/staging/src/k8s.io/cloud-provider/cloud.go b/staging/src/k8s.io/cloud-provider/cloud.go index 50a30f31060..6db0219520b 100644 --- a/staging/src/k8s.io/cloud-provider/cloud.go +++ b/staging/src/k8s.io/cloud-provider/cloud.go @@ -42,8 +42,9 @@ type ControllerClientBuilder interface { // Interface is an abstract, pluggable interface for cloud providers. type Interface interface { // Initialize provides the cloud with a kubernetes client builder and may spawn goroutines - // to perform housekeeping activities within the cloud provider. - Initialize(clientBuilder ControllerClientBuilder) + // to perform housekeeping or run custom controllers specific to the cloud provider. + // Any tasks started here should be cleaned up when the stop channel closes. + Initialize(clientBuilder ControllerClientBuilder, stop <-chan struct{}) // LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. LoadBalancer() (LoadBalancer, bool) // Instances returns an instances interface. Also returns true if the interface is supported, false otherwise. From 3521ebd1e7d75195fcb3468a51793baa5b00f58d Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Fri, 19 Oct 2018 17:01:13 -0400 Subject: [PATCH 15/19] pass in stopCh for GCE cluster ID controller --- pkg/cloudprovider/providers/gce/gce.go | 2 +- pkg/cloudprovider/providers/gce/gce_clusterid.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 857c266fcf3..794bf85efd3 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -620,7 +620,7 @@ func (gce *GCECloud) Initialize(clientBuilder cloudprovider.ControllerClientBuil gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"}) } - go gce.watchClusterID() + go gce.watchClusterID(stop) } // LoadBalancer returns an implementation of LoadBalancer for Google Compute Engine. diff --git a/pkg/cloudprovider/providers/gce/gce_clusterid.go b/pkg/cloudprovider/providers/gce/gce_clusterid.go index 46b4ff4f6a0..80e54bd1723 100644 --- a/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -59,7 +59,7 @@ type ClusterID struct { } // Continually watches for changes to the cluster id config map -func (gce *GCECloud) watchClusterID() { +func (gce *GCECloud) watchClusterID(stop <-chan struct{}) { gce.ClusterID = ClusterID{ cfgMapKey: fmt.Sprintf("%v/%v", UIDNamespace, UIDConfigMapName), client: gce.client, @@ -105,7 +105,7 @@ func (gce *GCECloud) watchClusterID() { var controller cache.Controller gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) - controller.Run(nil) + controller.Run(stop) } // GetID returns the id which is unique to this cluster From 3787a4b5be4fd33295f8d33bd07c00a21ee17412 Mon Sep 17 00:00:00 2001 From: Benjamin Elder Date: Fri, 19 Oct 2018 17:05:06 -0700 Subject: [PATCH 16/19] register skeleton provider --- test/e2e/framework/provider.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/provider.go b/test/e2e/framework/provider.go index feef1ca3377..f6c8181e0cf 100644 --- a/test/e2e/framework/provider.go +++ b/test/e2e/framework/provider.go @@ -43,10 +43,13 @@ func RegisterProvider(name string, factory Factory) { } func init() { - // "local" can always be used. + // "local" or "skeleton" can always be used. RegisterProvider("local", func() (ProviderInterface, error) { return NullProvider{}, nil }) + RegisterProvider("skeleton", func() (ProviderInterface, error) { + return NullProvider{}, nil + }) // The empty string also works, but triggers a warning. RegisterProvider("", func() (ProviderInterface, error) { Logf("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") From 33a866c4707f34419e088d9a89f776ee67dca92d Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 19 Oct 2018 17:16:10 -0400 Subject: [PATCH 17/19] Remove temporary workaround for scheduler alpha config-loading --- cmd/kube-scheduler/app/options/BUILD | 3 -- cmd/kube-scheduler/app/options/configfile.go | 44 ------------------- .../app/options/options_test.go | 31 +------------ 3 files changed, 1 insertion(+), 77 deletions(-) diff --git a/cmd/kube-scheduler/app/options/BUILD b/cmd/kube-scheduler/app/options/BUILD index 3531b68930c..ebe68f08340 100644 --- a/cmd/kube-scheduler/app/options/BUILD +++ b/cmd/kube-scheduler/app/options/BUILD @@ -20,12 +20,9 @@ go_library( "//pkg/scheduler/apis/config/validation:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/cmd/kube-scheduler/app/options/configfile.go b/cmd/kube-scheduler/app/options/configfile.go index a564c71409e..25303ee347f 100644 --- a/cmd/kube-scheduler/app/options/configfile.go +++ b/cmd/kube-scheduler/app/options/configfile.go @@ -17,16 +17,11 @@ limitations under the License. package options import ( - "bytes" "errors" - "fmt" "io/ioutil" "os" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" @@ -45,45 +40,6 @@ func loadConfigFromFile(file string) (*kubeschedulerconfig.KubeSchedulerConfigur func loadConfig(data []byte) (*kubeschedulerconfig.KubeSchedulerConfiguration, error) { configObj := &kubeschedulerconfig.KubeSchedulerConfiguration{} if err := runtime.DecodeInto(kubeschedulerscheme.Codecs.UniversalDecoder(), data, configObj); err != nil { - - // if this is a componentconfig/v1alpha1 KubeSchedulerConfiguration object, coerce it to kubescheduler.config.k8s.io/v1alpha1 with a warning - // TODO: drop this block in 1.13 - if runtime.IsNotRegisteredError(err) { - originalErr := err - var ( - u = &unstructured.Unstructured{} - codec = json.NewYAMLSerializer(json.DefaultMetaFactory, kubeschedulerscheme.Scheme, kubeschedulerscheme.Scheme) - legacyConfigGVK = schema.GroupVersionKind{Group: "componentconfig", Version: "v1alpha1", Kind: "KubeSchedulerConfiguration"} - ) - // attempt to decode to an unstructured object - obj, gvk, err := codec.Decode(data, nil, u) - - // if this errored, or the object we read was not the legacy alpha gvk, return the original error - if err != nil || gvk == nil || *gvk != legacyConfigGVK { - return nil, originalErr - } - - fmt.Printf("WARNING: the provided config file is an unsupported apiVersion (%q), which will be removed in future releases\n\n", legacyConfigGVK.GroupVersion().String()) - fmt.Printf("WARNING: switch to command-line flags or update your config file apiVersion to %q\n\n", kubeschedulerconfigv1alpha1.SchemeGroupVersion.String()) - fmt.Printf("WARNING: apiVersions at alpha-level are not guaranteed to be supported in future releases\n\n") - - // attempt to coerce to the new alpha gvk - if err := meta.NewAccessor().SetAPIVersion(obj, kubeschedulerconfigv1alpha1.SchemeGroupVersion.String()); err != nil { - // return the original error on failure - return nil, originalErr - } - - // attempt to encode the coerced apiVersion back to bytes - buffer := bytes.NewBuffer([]byte{}) - if err := codec.Encode(obj, buffer); err != nil { - // return the original error on failure - return nil, originalErr - } - - // re-attempt to load the coerced apiVersion - return loadConfig(buffer.Bytes()) - } - return nil, err } diff --git a/cmd/kube-scheduler/app/options/options_test.go b/cmd/kube-scheduler/app/options/options_test.go index e8e683c82ea..b63fac090c4 100644 --- a/cmd/kube-scheduler/app/options/options_test.go +++ b/cmd/kube-scheduler/app/options/options_test.go @@ -217,36 +217,7 @@ users: return *cfg }(), }, - // TODO: switch this to expect an error in 1.13 when the special-case coercion is removed from loadConfig - // expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\"", - expectedUsername: "config", - expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{ - SchedulerName: "default-scheduler", - AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource}, - HardPodAffinitySymmetricWeight: 1, - HealthzBindAddress: "0.0.0.0:10251", - MetricsBindAddress: "0.0.0.0:10251", - FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region", - LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{ - LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{ - LeaderElect: true, - LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, - ResourceLock: "endpoints", - }, - LockObjectNamespace: "kube-system", - LockObjectName: "kube-scheduler", - }, - ClientConnection: apimachineryconfig.ClientConnectionConfiguration{ - Kubeconfig: configKubeconfig, - QPS: 50, - Burst: 100, - ContentType: "application/vnd.kubernetes.protobuf", - }, - PercentageOfNodesToScore: 50, - BindTimeoutSeconds: &defaultBindTimeoutSeconds, - }, + expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\"", }, { From 1ada4b23b7421e64ff2604120045137a67dec835 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sun, 21 Oct 2018 00:42:08 +0300 Subject: [PATCH 18/19] kubeadm: fix typo: missing round bracket --- cmd/kubeadm/app/phases/upgrade/staticpods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index cae7d7a1c7e..4129b6698ca 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -226,7 +226,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP if waitForComponentRestart { fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component") - fmt.Printf("[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout %v\n", UpgradeManifestTimeout) + fmt.Printf("[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout %v)\n", UpgradeManifestTimeout) // Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to // notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy From 149fbe387420a002e7cb425d4945e18633302adf Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sun, 21 Oct 2018 10:07:02 +0300 Subject: [PATCH 19/19] kubeadm: don't prepull etcd image on upgrade Skipped prepulling etcd image if external etcd is used. Fixes: kubernetes/kubeadm#1136 --- cmd/kubeadm/app/cmd/upgrade/apply.go | 6 +++++- cmd/kubeadm/app/phases/upgrade/prepull.go | 3 +-- cmd/kubeadm/app/phases/upgrade/prepull_test.go | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 2430701d072..acffb90f441 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -194,7 +194,11 @@ func RunApply(flags *applyFlags) error { // and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally glog.V(1).Infof("[upgrade/apply] creating prepuller") prepuller := upgrade.NewDaemonSetPrepuller(upgradeVars.client, upgradeVars.waiter, &upgradeVars.cfg.ClusterConfiguration) - if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout); err != nil { + componentsToPrepull := constants.MasterComponents + if upgradeVars.cfg.Etcd.External != nil { + componentsToPrepull = append(componentsToPrepull, constants.Etcd) + } + if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout, componentsToPrepull); err != nil { return fmt.Errorf("[upgrade/prepull] Failed prepulled the images for the control plane components error: %v", err) } diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go index 19eb28f61c9..63a2be77d8d 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull.go @@ -91,8 +91,7 @@ func (d *DaemonSetPrepuller) DeleteFunc(component string) error { } // PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull -func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) error { - componentsToPrepull := append(constants.MasterComponents, constants.Etcd) +func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration, componentsToPrepull []string) error { fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull) timeoutChan := time.After(timeout) diff --git a/cmd/kubeadm/app/phases/upgrade/prepull_test.go b/cmd/kubeadm/app/phases/upgrade/prepull_test.go index 9fd38b77c10..559f71fa586 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull_test.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull_test.go @@ -20,6 +20,8 @@ import ( "fmt" "testing" "time" + + "k8s.io/kubernetes/cmd/kubeadm/app/constants" //"k8s.io/apimachinery/pkg/util/version" ) @@ -133,7 +135,7 @@ func TestPrepullImagesInParallel(t *testing.T) { for _, rt := range tests { - actualErr := PrepullImagesInParallel(rt.p, rt.timeout) + actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.MasterComponents, constants.Etcd)) if (actualErr != nil) != rt.expectedErr { t.Errorf( "failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",