mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 03:38:15 +00:00 
			
		
		
		
	extract common function in service, ingress and federation e2e to util, to be reused.
This commit is contained in:
		@@ -18,61 +18,28 @@ package e2e
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
	"os"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
 | 
			
		||||
	"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/errors"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/restclient"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
			
		||||
	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/intstr"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/wait"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	UserAgentName = "federation-e2e-service-controller"
 | 
			
		||||
	// TODO(madhusudancs): Using the same values as defined in the federated
 | 
			
		||||
	// service controller. Replace it with the values from the e2e framework.
 | 
			
		||||
	KubeAPIQPS   = 20.0
 | 
			
		||||
	KubeAPIBurst = 30
 | 
			
		||||
 | 
			
		||||
	FederatedServiceTimeout = 60 * time.Second
 | 
			
		||||
 | 
			
		||||
	FederatedServiceName    = "federated-service"
 | 
			
		||||
	FederatedServicePodName = "federated-service-test-pod"
 | 
			
		||||
 | 
			
		||||
	DefaultFederationName = "federation"
 | 
			
		||||
 | 
			
		||||
	// We use this to decide how long to wait for our DNS probes to succeed.
 | 
			
		||||
	DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var FederatedServiceLabels = map[string]string{
 | 
			
		||||
	"foo": "bar",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
cluster keeps track of the assorted objects and state related to each cluster
 | 
			
		||||
in the federation
 | 
			
		||||
*/
 | 
			
		||||
type cluster struct {
 | 
			
		||||
	name string
 | 
			
		||||
	*release_1_3.Clientset
 | 
			
		||||
	namespaceCreated bool    // Did we need to create a new namespace in this cluster?  If so, we should delete it.
 | 
			
		||||
	backendPod       *v1.Pod // The backend pod, if one's been created.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ = framework.KubeDescribe("[Feature:Federation]", func() {
 | 
			
		||||
	f := framework.NewDefaultFederatedFramework("federated-service")
 | 
			
		||||
	var clusters map[string]*cluster // All clusters, keyed by cluster name
 | 
			
		||||
@@ -88,98 +55,12 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
 | 
			
		||||
				federationName = DefaultFederationName
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			contexts := f.GetUnderlyingFederatedContexts()
 | 
			
		||||
 | 
			
		||||
			for _, context := range contexts {
 | 
			
		||||
				createClusterObjectOrFail(f, &context)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			var clusterList *federationapi.ClusterList
 | 
			
		||||
			By("Obtaining a list of all the clusters")
 | 
			
		||||
			if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
 | 
			
		||||
				var err error
 | 
			
		||||
				clusterList, err = f.FederationClientset_1_4.Federation().Clusters().List(api.ListOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, err
 | 
			
		||||
				}
 | 
			
		||||
				framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), len(contexts))
 | 
			
		||||
				if len(clusterList.Items) == len(contexts) {
 | 
			
		||||
					return true, nil
 | 
			
		||||
				}
 | 
			
		||||
				return false, nil
 | 
			
		||||
			}); err != nil {
 | 
			
		||||
				framework.Failf("Failed to list registered clusters: %+v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			framework.Logf("Checking that %d clusters are Ready", len(contexts))
 | 
			
		||||
			for _, context := range contexts {
 | 
			
		||||
				clusterIsReadyOrFail(f, &context)
 | 
			
		||||
			}
 | 
			
		||||
			framework.Logf("%d clusters are Ready", len(contexts))
 | 
			
		||||
 | 
			
		||||
			clusters = map[string]*cluster{}
 | 
			
		||||
			primaryClusterName = clusterList.Items[0].Name
 | 
			
		||||
			By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
 | 
			
		||||
			for i, c := range clusterList.Items {
 | 
			
		||||
				framework.Logf("Creating a clientset for the cluster %s", c.Name)
 | 
			
		||||
 | 
			
		||||
				Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
 | 
			
		||||
				kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
 | 
			
		||||
				framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
 | 
			
		||||
 | 
			
		||||
				cfgOverride := &clientcmd.ConfigOverrides{
 | 
			
		||||
					ClusterInfo: clientcmdapi.Cluster{
 | 
			
		||||
						Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
				ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
 | 
			
		||||
				cfg, err := ccfg.ClientConfig()
 | 
			
		||||
				framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
 | 
			
		||||
 | 
			
		||||
				cfg.QPS = KubeAPIQPS
 | 
			
		||||
				cfg.Burst = KubeAPIBurst
 | 
			
		||||
				clset := release_1_3.NewForConfigOrDie(restclient.AddUserAgent(cfg, UserAgentName))
 | 
			
		||||
				clusters[c.Name] = &cluster{c.Name, clset, false, nil}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			for name, c := range clusters {
 | 
			
		||||
				// The e2e Framework created the required namespace in one of the clusters, but we need to create it in all the others, if it doesn't yet exist.
 | 
			
		||||
				if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); errors.IsNotFound(err) {
 | 
			
		||||
					ns := &v1.Namespace{
 | 
			
		||||
						ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
							Name: f.Namespace.Name,
 | 
			
		||||
						},
 | 
			
		||||
					}
 | 
			
		||||
					_, err := c.Clientset.Core().Namespaces().Create(ns)
 | 
			
		||||
					if err == nil {
 | 
			
		||||
						c.namespaceCreated = true
 | 
			
		||||
					}
 | 
			
		||||
					framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
					framework.Logf("Namespace %s created in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
				} else if err != nil {
 | 
			
		||||
					framework.Logf("Couldn't create the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			primaryClusterName = registerClusters(clusters, UserAgentName, federationName, f)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		AfterEach(func() {
 | 
			
		||||
			for name, c := range clusters {
 | 
			
		||||
				if c.namespaceCreated {
 | 
			
		||||
					if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); !errors.IsNotFound(err) {
 | 
			
		||||
						err := c.Clientset.Core().Namespaces().Delete(f.Namespace.Name, &api.DeleteOptions{})
 | 
			
		||||
						framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
 | 
			
		||||
					}
 | 
			
		||||
					framework.Logf("Namespace %s deleted in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Delete the registered clusters in the federation API server.
 | 
			
		||||
			clusterList, err := f.FederationClientset_1_4.Federation().Clusters().List(api.ListOptions{})
 | 
			
		||||
			framework.ExpectNoError(err, "Error listing clusters")
 | 
			
		||||
			for _, cluster := range clusterList.Items {
 | 
			
		||||
				err := f.FederationClientset_1_4.Federation().Clusters().Delete(cluster.Name, &api.DeleteOptions{})
 | 
			
		||||
				framework.ExpectNoError(err, "Error deleting cluster %q", cluster.Name)
 | 
			
		||||
			}
 | 
			
		||||
			unregisterClusters(clusters, f)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		Describe("Service creation", func() {
 | 
			
		||||
@@ -195,7 +76,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
 | 
			
		||||
 | 
			
		||||
			It("should succeed", func() {
 | 
			
		||||
				framework.SkipUnlessFederated(f.Client)
 | 
			
		||||
				service := createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name)
 | 
			
		||||
				service := createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name, FederatedServiceName)
 | 
			
		||||
				By(fmt.Sprintf("Creation of service %q in namespace %q succeeded.  Deleting service.", service.Name, f.Namespace.Name))
 | 
			
		||||
				// Cleanup
 | 
			
		||||
				err := f.FederationClientset_1_4.Services(f.Namespace.Name).Delete(service.Name, &api.DeleteOptions{})
 | 
			
		||||
@@ -205,7 +86,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
 | 
			
		||||
 | 
			
		||||
			It("should create matching services in underlying clusters", func() {
 | 
			
		||||
				framework.SkipUnlessFederated(f.Client)
 | 
			
		||||
				service := createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name)
 | 
			
		||||
				service := createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name, FederatedServiceName)
 | 
			
		||||
				defer func() { // Cleanup
 | 
			
		||||
					By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, f.Namespace.Name))
 | 
			
		||||
					err := f.FederationClientset_1_4.Services(f.Namespace.Name).Delete(service.Name, &api.DeleteOptions{})
 | 
			
		||||
@@ -224,7 +105,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
 | 
			
		||||
			BeforeEach(func() {
 | 
			
		||||
				framework.SkipUnlessFederated(f.Client)
 | 
			
		||||
				createBackendPodsOrFail(clusters, f.Namespace.Name, FederatedServicePodName)
 | 
			
		||||
				service = createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name)
 | 
			
		||||
				service = createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name, FederatedServiceName)
 | 
			
		||||
				waitForServiceShardsOrFail(f.Namespace.Name, service, clusters)
 | 
			
		||||
			})
 | 
			
		||||
 | 
			
		||||
@@ -318,224 +199,3 @@ func equivalent(federationService, clusterService v1.Service) bool {
 | 
			
		||||
	}
 | 
			
		||||
	return reflect.DeepEqual(clusterService.Spec, federationService.Spec)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
   waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
 | 
			
		||||
   If the condition is not met within timout, it fails the calling test.
 | 
			
		||||
*/
 | 
			
		||||
func waitForServiceOrFail(clientset *release_1_3.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
 | 
			
		||||
	By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
 | 
			
		||||
	var clusterService *v1.Service
 | 
			
		||||
	err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
 | 
			
		||||
		clusterService, err := clientset.Services(namespace).Get(service.Name)
 | 
			
		||||
		if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
 | 
			
		||||
			By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace))
 | 
			
		||||
			return true, nil // Success
 | 
			
		||||
		}
 | 
			
		||||
		if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
 | 
			
		||||
			By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is present", service.Name, namespace))
 | 
			
		||||
			return true, nil // Success
 | 
			
		||||
		}
 | 
			
		||||
		By(fmt.Sprintf("Service %q in namespace %q in cluster.  Found: %v, waiting for Found: %v, trying again in %s (err=%v)", service.Name, namespace, clusterService != nil && err == nil, present, framework.Poll, err))
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
	framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
 | 
			
		||||
 | 
			
		||||
	if present && clusterService != nil {
 | 
			
		||||
		Expect(equivalent(*clusterService, *service))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
   waitForServiceShardsOrFail waits for the service to appear in all clusters
 | 
			
		||||
*/
 | 
			
		||||
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
 | 
			
		||||
	framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
 | 
			
		||||
	for _, c := range clusters {
 | 
			
		||||
		waitForServiceOrFail(c.Clientset, namespace, service, true, FederatedServiceTimeout)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createService(clientset *federation_release_1_4.Clientset, namespace string) (*v1.Service, error) {
 | 
			
		||||
	if clientset == nil || len(namespace) == 0 {
 | 
			
		||||
		return nil, fmt.Errorf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v", clientset, namespace)
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Creating federated service %q in namespace %q", FederatedServiceName, namespace))
 | 
			
		||||
 | 
			
		||||
	service := &v1.Service{
 | 
			
		||||
		ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
			Name: FederatedServiceName,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ServiceSpec{
 | 
			
		||||
			Selector: FederatedServiceLabels,
 | 
			
		||||
			Type:     "LoadBalancer",
 | 
			
		||||
			Ports: []v1.ServicePort{
 | 
			
		||||
				{
 | 
			
		||||
					Name:       "http",
 | 
			
		||||
					Port:       80,
 | 
			
		||||
					TargetPort: intstr.FromInt(8080),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace))
 | 
			
		||||
	return clientset.Services(namespace).Create(service)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createServiceOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1.Service {
 | 
			
		||||
	service, err := createService(clientset, namespace)
 | 
			
		||||
	framework.ExpectNoError(err, "Creating service %q in namespace %q", service.Name, namespace)
 | 
			
		||||
	By(fmt.Sprintf("Successfully created federated service %q in namespace %q", FederatedServiceName, namespace))
 | 
			
		||||
	return service
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func deleteServiceOrFail(clientset *federation_release_1_4.Clientset, namespace string, serviceName string) {
 | 
			
		||||
	if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
 | 
			
		||||
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
 | 
			
		||||
	}
 | 
			
		||||
	err := clientset.Services(namespace).Delete(serviceName, api.NewDeleteOptions(0))
 | 
			
		||||
	framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func podExitCodeDetector(f *framework.Framework, name string, code int32) func() error {
 | 
			
		||||
	// If we ever get any container logs, stash them here.
 | 
			
		||||
	logs := ""
 | 
			
		||||
 | 
			
		||||
	logerr := func(err error) error {
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		if logs == "" {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		return fmt.Errorf("%s (%v)", logs, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return func() error {
 | 
			
		||||
		pod, err := f.Client.Pods(f.Namespace.Name).Get(name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return logerr(err)
 | 
			
		||||
		}
 | 
			
		||||
		if len(pod.Status.ContainerStatuses) < 1 {
 | 
			
		||||
			return logerr(fmt.Errorf("no container statuses"))
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Best effort attempt to grab pod logs for debugging
 | 
			
		||||
		logs, err = framework.GetPodLogs(f.Client, f.Namespace.Name, name, pod.Spec.Containers[0].Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf("Cannot fetch pod logs: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		status := pod.Status.ContainerStatuses[0]
 | 
			
		||||
		if status.State.Terminated == nil {
 | 
			
		||||
			return logerr(fmt.Errorf("container is not in terminated state"))
 | 
			
		||||
		}
 | 
			
		||||
		if status.State.Terminated.ExitCode == code {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return logerr(fmt.Errorf("exited %d", status.State.Terminated.ExitCode))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func discoverService(f *framework.Framework, name string, exists bool, podName string) {
 | 
			
		||||
	command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
 | 
			
		||||
	By(fmt.Sprintf("Looking up %q", name))
 | 
			
		||||
 | 
			
		||||
	pod := &api.Pod{
 | 
			
		||||
		ObjectMeta: api.ObjectMeta{
 | 
			
		||||
			Name: podName,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: api.PodSpec{
 | 
			
		||||
			Containers: []api.Container{
 | 
			
		||||
				{
 | 
			
		||||
					Name:    "federated-service-discovery-container",
 | 
			
		||||
					Image:   "gcr.io/google_containers/busybox:1.24",
 | 
			
		||||
					Command: command,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			RestartPolicy: api.RestartPolicyOnFailure,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, f.Namespace.Name))
 | 
			
		||||
	_, err := f.Client.Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
	framework.ExpectNoError(err, "Trying to create pod to run %q", command)
 | 
			
		||||
	By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, f.Namespace.Name))
 | 
			
		||||
	defer func() {
 | 
			
		||||
		By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, f.Namespace.Name))
 | 
			
		||||
		err := f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
 | 
			
		||||
		framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, f.Namespace.Name)
 | 
			
		||||
		By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, f.Namespace.Name))
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	if exists {
 | 
			
		||||
		// TODO(mml): Eventually check the IP address is correct, too.
 | 
			
		||||
		Eventually(podExitCodeDetector(f, podName, 0), 3*DNSTTL, time.Second*2).
 | 
			
		||||
			Should(BeNil(), "%q should exit 0, but it never did", command)
 | 
			
		||||
	} else {
 | 
			
		||||
		Eventually(podExitCodeDetector(f, podName, 0), 3*DNSTTL, time.Second*2).
 | 
			
		||||
			ShouldNot(BeNil(), "%q should eventually not exit 0, but it always did", command)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
 | 
			
		||||
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
 | 
			
		||||
*/
 | 
			
		||||
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
 | 
			
		||||
	pod := &v1.Pod{
 | 
			
		||||
		ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
			// Namespace: namespace,
 | 
			
		||||
			Labels: FederatedServiceLabels,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.PodSpec{
 | 
			
		||||
			Containers: []v1.Container{
 | 
			
		||||
				{
 | 
			
		||||
					Name:  name,
 | 
			
		||||
					Image: "gcr.io/google_containers/echoserver:1.4",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			RestartPolicy: v1.RestartPolicyAlways,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
 | 
			
		||||
		createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
 | 
			
		||||
		framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
 | 
			
		||||
		By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
 | 
			
		||||
		c.backendPod = createdPod
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
 | 
			
		||||
The test fails if there are any errors.
 | 
			
		||||
*/
 | 
			
		||||
func deleteOneBackendPodOrFail(c *cluster) {
 | 
			
		||||
	pod := c.backendPod
 | 
			
		||||
	Expect(pod).ToNot(BeNil())
 | 
			
		||||
	err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
 | 
			
		||||
	if errors.IsNotFound(err) {
 | 
			
		||||
		By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist.  No need to delete it.", pod.Name, pod.Namespace, c.name))
 | 
			
		||||
	} else {
 | 
			
		||||
		framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
deleteBackendPodsOrFail deletes one pod from each cluster that has one.
 | 
			
		||||
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
 | 
			
		||||
*/
 | 
			
		||||
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		if c.backendPod != nil {
 | 
			
		||||
			deleteOneBackendPodOrFail(c)
 | 
			
		||||
			c.backendPod = nil
 | 
			
		||||
		} else {
 | 
			
		||||
			By(fmt.Sprintf("No backend pod to delete for cluster %q", name))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -18,12 +18,46 @@ package e2e
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/intstr"
 | 
			
		||||
 | 
			
		||||
	federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
 | 
			
		||||
	"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/errors"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/restclient"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
			
		||||
	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/wait"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	KubeAPIQPS            float32 = 20.0
 | 
			
		||||
	KubeAPIBurst                  = 30
 | 
			
		||||
	DefaultFederationName         = "federation"
 | 
			
		||||
	UserAgentName                 = "federation-e2e"
 | 
			
		||||
	// We use this to decide how long to wait for our DNS probes to succeed.
 | 
			
		||||
	DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
cluster keeps track of the assorted objects and state related to each cluster
 | 
			
		||||
in the federation
 | 
			
		||||
*/
 | 
			
		||||
type cluster struct {
 | 
			
		||||
	name string
 | 
			
		||||
	*release_1_3.Clientset
 | 
			
		||||
	namespaceCreated bool    // Did we need to create a new namespace in this cluster?  If so, we should delete it.
 | 
			
		||||
	backendPod       *v1.Pod // The backend pod, if one's been created.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createClusterObjectOrFail(f *framework.Framework, context *framework.E2EContext) {
 | 
			
		||||
	framework.Logf("Creating cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
 | 
			
		||||
	cluster := federationapi.Cluster{
 | 
			
		||||
@@ -60,3 +94,330 @@ func clusterIsReadyOrFail(f *framework.Framework, context *framework.E2EContext)
 | 
			
		||||
	framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
 | 
			
		||||
	framework.Logf("Cluster %s is Ready", context.Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func waitforclustersReadness(f *framework.Framework, clusterSize int) *federationapi.ClusterList {
 | 
			
		||||
	var clusterList *federationapi.ClusterList
 | 
			
		||||
	if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
 | 
			
		||||
		var err error
 | 
			
		||||
		clusterList, err = f.FederationClientset_1_4.Federation().Clusters().List(api.ListOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterSize)
 | 
			
		||||
		if len(clusterList.Items) == clusterSize {
 | 
			
		||||
			return true, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}); err != nil {
 | 
			
		||||
		framework.Failf("Failed to list registered clusters: %+v", err)
 | 
			
		||||
	}
 | 
			
		||||
	return clusterList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *release_1_3.Clientset {
 | 
			
		||||
	kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
 | 
			
		||||
	framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
 | 
			
		||||
 | 
			
		||||
	cfgOverride := &clientcmd.ConfigOverrides{
 | 
			
		||||
		ClusterInfo: clientcmdapi.Cluster{
 | 
			
		||||
			Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
 | 
			
		||||
	cfg, err := ccfg.ClientConfig()
 | 
			
		||||
	framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
 | 
			
		||||
 | 
			
		||||
	cfg.QPS = KubeAPIQPS
 | 
			
		||||
	cfg.Burst = KubeAPIBurst
 | 
			
		||||
	return release_1_3.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createNamespaceInClusters(clusters map[string]*cluster, f *framework.Framework) {
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		// The e2e Framework created the required namespace in one of the clusters, but we need to create it in all the others, if it doesn't yet exist.
 | 
			
		||||
		if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); errors.IsNotFound(err) {
 | 
			
		||||
			ns := &v1.Namespace{
 | 
			
		||||
				ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
					Name: f.Namespace.Name,
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			_, err := c.Clientset.Core().Namespaces().Create(ns)
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				c.namespaceCreated = true
 | 
			
		||||
			}
 | 
			
		||||
			framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
			framework.Logf("Namespace %s created in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
		} else if err != nil {
 | 
			
		||||
			framework.Logf("Couldn't create the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
func unregisterClusters(clusters map[string]*cluster, f *framework.Framework) {
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		if c.namespaceCreated {
 | 
			
		||||
			if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); !errors.IsNotFound(err) {
 | 
			
		||||
				err := c.Clientset.Core().Namespaces().Delete(f.Namespace.Name, &api.DeleteOptions{})
 | 
			
		||||
				framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
 | 
			
		||||
			}
 | 
			
		||||
			framework.Logf("Namespace %s deleted in cluster %q", f.Namespace.Name, name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Delete the registered clusters in the federation API server.
 | 
			
		||||
	clusterList, err := f.FederationClientset_1_4.Federation().Clusters().List(api.ListOptions{})
 | 
			
		||||
	framework.ExpectNoError(err, "Error listing clusters")
 | 
			
		||||
	for _, cluster := range clusterList.Items {
 | 
			
		||||
		err := f.FederationClientset_1_4.Federation().Clusters().Delete(cluster.Name, &api.DeleteOptions{})
 | 
			
		||||
		framework.ExpectNoError(err, "Error deleting cluster %q", cluster.Name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
 | 
			
		||||
func registerClusters(clusters map[string]*cluster, userAgentName, federationName string, f *framework.Framework) string {
 | 
			
		||||
 | 
			
		||||
	contexts := f.GetUnderlyingFederatedContexts()
 | 
			
		||||
 | 
			
		||||
	for _, context := range contexts {
 | 
			
		||||
		createClusterObjectOrFail(f, &context)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	By("Obtaining a list of all the clusters")
 | 
			
		||||
	clusterList := waitforclustersReadness(f, len(contexts))
 | 
			
		||||
 | 
			
		||||
	framework.Logf("Checking that %d clusters are Ready", len(contexts))
 | 
			
		||||
	for _, context := range contexts {
 | 
			
		||||
		clusterIsReadyOrFail(f, &context)
 | 
			
		||||
	}
 | 
			
		||||
	framework.Logf("%d clusters are Ready", len(contexts))
 | 
			
		||||
 | 
			
		||||
	primaryClusterName := clusterList.Items[0].Name
 | 
			
		||||
	By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
 | 
			
		||||
	for i, c := range clusterList.Items {
 | 
			
		||||
		framework.Logf("Creating a clientset for the cluster %s", c.Name)
 | 
			
		||||
		Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
 | 
			
		||||
		clusters[c.Name] = &cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
 | 
			
		||||
	}
 | 
			
		||||
	createNamespaceInClusters(clusters, f)
 | 
			
		||||
	return primaryClusterName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
   waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
 | 
			
		||||
   If the condition is not met within timout, it fails the calling test.
 | 
			
		||||
*/
 | 
			
		||||
func waitForServiceOrFail(clientset *release_1_3.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
 | 
			
		||||
	By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
 | 
			
		||||
	var clusterService *v1.Service
 | 
			
		||||
	err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
 | 
			
		||||
		clusterService, err := clientset.Services(namespace).Get(service.Name)
 | 
			
		||||
		if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
 | 
			
		||||
			By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace))
 | 
			
		||||
			return true, nil // Success
 | 
			
		||||
		}
 | 
			
		||||
		if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
 | 
			
		||||
			By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is present", service.Name, namespace))
 | 
			
		||||
			return true, nil // Success
 | 
			
		||||
		}
 | 
			
		||||
		By(fmt.Sprintf("Service %q in namespace %q in cluster.  Found: %v, waiting for Found: %v, trying again in %s (err=%v)", service.Name, namespace, clusterService != nil && err == nil, present, framework.Poll, err))
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
	framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
 | 
			
		||||
 | 
			
		||||
	if present && clusterService != nil {
 | 
			
		||||
		Expect(equivalent(*clusterService, *service))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
   waitForServiceShardsOrFail waits for the service to appear in all clusters
 | 
			
		||||
*/
 | 
			
		||||
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
 | 
			
		||||
	framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
 | 
			
		||||
	for _, c := range clusters {
 | 
			
		||||
		waitForServiceOrFail(c.Clientset, namespace, service, true, FederatedServiceTimeout)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createService(clientset *federation_release_1_4.Clientset, namespace, name string) (*v1.Service, error) {
 | 
			
		||||
	if clientset == nil || len(namespace) == 0 {
 | 
			
		||||
		return nil, fmt.Errorf("Internal error: invalid parameters passed to createService: clientset: %v, namespace: %v", clientset, namespace)
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Creating federated service %q in namespace %q", name, namespace))
 | 
			
		||||
 | 
			
		||||
	service := &v1.Service{
 | 
			
		||||
		ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ServiceSpec{
 | 
			
		||||
			Selector: FederatedServiceLabels,
 | 
			
		||||
			Type:     "LoadBalancer",
 | 
			
		||||
			Ports: []v1.ServicePort{
 | 
			
		||||
				{
 | 
			
		||||
					Name:       "http",
 | 
			
		||||
					Port:       80,
 | 
			
		||||
					TargetPort: intstr.FromInt(8080),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace))
 | 
			
		||||
	return clientset.Services(namespace).Create(service)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createServiceOrFail(clientset *federation_release_1_4.Clientset, namespace, name string) *v1.Service {
 | 
			
		||||
	service, err := createService(clientset, namespace, name)
 | 
			
		||||
	framework.ExpectNoError(err, "Creating service %q in namespace %q", service.Name, namespace)
 | 
			
		||||
	By(fmt.Sprintf("Successfully created federated service %q in namespace %q", name, namespace))
 | 
			
		||||
	return service
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func deleteServiceOrFail(clientset *federation_release_1_4.Clientset, namespace string, serviceName string) {
 | 
			
		||||
	if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
 | 
			
		||||
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
 | 
			
		||||
	}
 | 
			
		||||
	err := clientset.Services(namespace).Delete(serviceName, api.NewDeleteOptions(0))
 | 
			
		||||
	framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func podExitCodeDetector(f *framework.Framework, name string, code int32) func() error {
 | 
			
		||||
	// If we ever get any container logs, stash them here.
 | 
			
		||||
	logs := ""
 | 
			
		||||
 | 
			
		||||
	logerr := func(err error) error {
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		if logs == "" {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		return fmt.Errorf("%s (%v)", logs, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return func() error {
 | 
			
		||||
		pod, err := f.Client.Pods(f.Namespace.Name).Get(name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return logerr(err)
 | 
			
		||||
		}
 | 
			
		||||
		if len(pod.Status.ContainerStatuses) < 1 {
 | 
			
		||||
			return logerr(fmt.Errorf("no container statuses"))
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Best effort attempt to grab pod logs for debugging
 | 
			
		||||
		logs, err = framework.GetPodLogs(f.Client, f.Namespace.Name, name, pod.Spec.Containers[0].Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf("Cannot fetch pod logs: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		status := pod.Status.ContainerStatuses[0]
 | 
			
		||||
		if status.State.Terminated == nil {
 | 
			
		||||
			return logerr(fmt.Errorf("container is not in terminated state"))
 | 
			
		||||
		}
 | 
			
		||||
		if status.State.Terminated.ExitCode == code {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return logerr(fmt.Errorf("exited %d", status.State.Terminated.ExitCode))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func discoverService(f *framework.Framework, name string, exists bool, podName string) {
 | 
			
		||||
	command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
 | 
			
		||||
	By(fmt.Sprintf("Looking up %q", name))
 | 
			
		||||
 | 
			
		||||
	pod := &api.Pod{
 | 
			
		||||
		ObjectMeta: api.ObjectMeta{
 | 
			
		||||
			Name: podName,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: api.PodSpec{
 | 
			
		||||
			Containers: []api.Container{
 | 
			
		||||
				{
 | 
			
		||||
					Name:    "federated-service-discovery-container",
 | 
			
		||||
					Image:   "gcr.io/google_containers/busybox:1.24",
 | 
			
		||||
					Command: command,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			RestartPolicy: api.RestartPolicyOnFailure,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, f.Namespace.Name))
 | 
			
		||||
	_, err := f.Client.Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
	framework.ExpectNoError(err, "Trying to create pod to run %q", command)
 | 
			
		||||
	By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, f.Namespace.Name))
 | 
			
		||||
	defer func() {
 | 
			
		||||
		By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, f.Namespace.Name))
 | 
			
		||||
		err := f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
 | 
			
		||||
		framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, f.Namespace.Name)
 | 
			
		||||
		By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, f.Namespace.Name))
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	if exists {
 | 
			
		||||
		// TODO(mml): Eventually check the IP address is correct, too.
 | 
			
		||||
		Eventually(podExitCodeDetector(f, podName, 0), 3*DNSTTL, time.Second*2).
 | 
			
		||||
			Should(BeNil(), "%q should exit 0, but it never did", command)
 | 
			
		||||
	} else {
 | 
			
		||||
		Eventually(podExitCodeDetector(f, podName, 0), 3*DNSTTL, time.Second*2).
 | 
			
		||||
			ShouldNot(BeNil(), "%q should eventually not exit 0, but it always did", command)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
 | 
			
		||||
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
 | 
			
		||||
*/
 | 
			
		||||
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
 | 
			
		||||
	pod := &v1.Pod{
 | 
			
		||||
		ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
			// Namespace: namespace,
 | 
			
		||||
			Labels: FederatedServiceLabels,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.PodSpec{
 | 
			
		||||
			Containers: []v1.Container{
 | 
			
		||||
				{
 | 
			
		||||
					Name:  name,
 | 
			
		||||
					Image: "gcr.io/google_containers/echoserver:1.4",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			RestartPolicy: v1.RestartPolicyAlways,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
 | 
			
		||||
		createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
 | 
			
		||||
		framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
 | 
			
		||||
		By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
 | 
			
		||||
		c.backendPod = createdPod
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
 | 
			
		||||
The test fails if there are any errors.
 | 
			
		||||
*/
 | 
			
		||||
func deleteOneBackendPodOrFail(c *cluster) {
 | 
			
		||||
	pod := c.backendPod
 | 
			
		||||
	Expect(pod).ToNot(BeNil())
 | 
			
		||||
	err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
 | 
			
		||||
	if errors.IsNotFound(err) {
 | 
			
		||||
		By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist.  No need to delete it.", pod.Name, pod.Namespace, c.name))
 | 
			
		||||
	} else {
 | 
			
		||||
		framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
 | 
			
		||||
	}
 | 
			
		||||
	By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
deleteBackendPodsOrFail deletes one pod from each cluster that has one.
 | 
			
		||||
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
 | 
			
		||||
*/
 | 
			
		||||
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
 | 
			
		||||
	for name, c := range clusters {
 | 
			
		||||
		if c.backendPod != nil {
 | 
			
		||||
			deleteOneBackendPodOrFail(c)
 | 
			
		||||
			c.backendPod = nil
 | 
			
		||||
		} else {
 | 
			
		||||
			By(fmt.Sprintf("No backend pod to delete for cluster %q", name))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,30 +17,13 @@ limitations under the License.
 | 
			
		||||
package e2e
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	compute "google.golang.org/api/compute/v1"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	apierrs "k8s.io/kubernetes/pkg/api/errors"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
			
		||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
			
		||||
	utilexec "k8s.io/kubernetes/pkg/util/exec"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/wait"
 | 
			
		||||
	utilyaml "k8s.io/kubernetes/pkg/util/yaml"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -74,17 +57,12 @@ const (
 | 
			
		||||
 | 
			
		||||
var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() {
 | 
			
		||||
	defer GinkgoRecover()
 | 
			
		||||
	var (
 | 
			
		||||
		ns               string
 | 
			
		||||
		jig              *testJig
 | 
			
		||||
		conformanceTests []conformanceTests
 | 
			
		||||
	)
 | 
			
		||||
	f := framework.NewDefaultFramework("ingress")
 | 
			
		||||
	var ns string
 | 
			
		||||
	var jig *testJig
 | 
			
		||||
	manifestPath := filepath.Join(ingressManifestPath, "http")
 | 
			
		||||
 | 
			
		||||
	// These constants match the manifests used in ingressManifestPath
 | 
			
		||||
	tlsHost := "foo.bar.com"
 | 
			
		||||
	tlsSecretName := "foo"
 | 
			
		||||
	updatedTLSHost := "foobar.com"
 | 
			
		||||
	updateURLMapHost := "bar.baz.com"
 | 
			
		||||
	updateURLMapPath := "/testurl"
 | 
			
		||||
 | 
			
		||||
	BeforeEach(func() {
 | 
			
		||||
		f.BeforeEach()
 | 
			
		||||
@@ -92,79 +70,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() {
 | 
			
		||||
		ns = f.Namespace.Name
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	// Platform agnostic list of tests that must be satisfied by all controllers
 | 
			
		||||
	conformanceTests := []struct {
 | 
			
		||||
		entryLog string
 | 
			
		||||
		execute  func()
 | 
			
		||||
		exitLog  string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should create a basic HTTP ingress"),
 | 
			
		||||
			func() { jig.createIngress(manifestPath, ns, map[string]string{}) },
 | 
			
		||||
			fmt.Sprintf("waiting for urls on basic HTTP ingress"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should terminate TLS for host %v", tlsHost),
 | 
			
		||||
			func() { jig.addHTTPS(tlsSecretName, tlsHost) },
 | 
			
		||||
			fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should update SSL certificated with modified hostname %v", updatedTLSHost),
 | 
			
		||||
			func() {
 | 
			
		||||
				jig.update(func(ing *extensions.Ingress) {
 | 
			
		||||
					newRules := []extensions.IngressRule{}
 | 
			
		||||
					for _, rule := range ing.Spec.Rules {
 | 
			
		||||
						if rule.Host != tlsHost {
 | 
			
		||||
							newRules = append(newRules, rule)
 | 
			
		||||
							continue
 | 
			
		||||
						}
 | 
			
		||||
						newRules = append(newRules, extensions.IngressRule{
 | 
			
		||||
							Host:             updatedTLSHost,
 | 
			
		||||
							IngressRuleValue: rule.IngressRuleValue,
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
					ing.Spec.Rules = newRules
 | 
			
		||||
				})
 | 
			
		||||
				jig.addHTTPS(tlsSecretName, updatedTLSHost)
 | 
			
		||||
			},
 | 
			
		||||
			fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
 | 
			
		||||
			func() {
 | 
			
		||||
				var pathToFail string
 | 
			
		||||
				jig.update(func(ing *extensions.Ingress) {
 | 
			
		||||
					newRules := []extensions.IngressRule{}
 | 
			
		||||
					for _, rule := range ing.Spec.Rules {
 | 
			
		||||
						if rule.Host != updateURLMapHost {
 | 
			
		||||
							newRules = append(newRules, rule)
 | 
			
		||||
							continue
 | 
			
		||||
						}
 | 
			
		||||
						existingPath := rule.IngressRuleValue.HTTP.Paths[0]
 | 
			
		||||
						pathToFail = existingPath.Path
 | 
			
		||||
						newRules = append(newRules, extensions.IngressRule{
 | 
			
		||||
							Host: updateURLMapHost,
 | 
			
		||||
							IngressRuleValue: extensions.IngressRuleValue{
 | 
			
		||||
								HTTP: &extensions.HTTPIngressRuleValue{
 | 
			
		||||
									Paths: []extensions.HTTPIngressPath{
 | 
			
		||||
										{
 | 
			
		||||
											Path:    updateURLMapPath,
 | 
			
		||||
											Backend: existingPath.Backend,
 | 
			
		||||
										},
 | 
			
		||||
									},
 | 
			
		||||
								},
 | 
			
		||||
							},
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
					ing.Spec.Rules = newRules
 | 
			
		||||
				})
 | 
			
		||||
				By("Checking that " + pathToFail + " is not exposed by polling for failure")
 | 
			
		||||
				route := fmt.Sprintf("http://%v%v", jig.address, pathToFail)
 | 
			
		||||
				ExpectNoError(jig.pollURL(route, updateURLMapHost, lbCleanupTimeout, &http.Client{Timeout: reqTimeout}, true))
 | 
			
		||||
			},
 | 
			
		||||
			fmt.Sprintf("Waiting for path updates to reflect in L7"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	conformanceTests = createComformanceTests(jig, ns)
 | 
			
		||||
 | 
			
		||||
	// Before enabling this loadbalancer test in any other test list you must
 | 
			
		||||
	// make sure the associated project has enough quota. At the time of this
 | 
			
		||||
@@ -197,18 +103,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() {
 | 
			
		||||
			jig.deleteIngress()
 | 
			
		||||
 | 
			
		||||
			By("Cleaning up cloud resources")
 | 
			
		||||
			if pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) {
 | 
			
		||||
				if err := gceController.Cleanup(false); err != nil {
 | 
			
		||||
					framework.Logf("Still waiting for glbc to cleanup: %v", err)
 | 
			
		||||
					return false, nil
 | 
			
		||||
				}
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}); pollErr != nil {
 | 
			
		||||
				if cleanupErr := gceController.Cleanup(true); cleanupErr != nil {
 | 
			
		||||
					framework.Logf("WARNING: Failed to cleanup resources %v", cleanupErr)
 | 
			
		||||
				}
 | 
			
		||||
				framework.Failf("Failed to cleanup GCE L7 resources.")
 | 
			
		||||
			}
 | 
			
		||||
			cleanupGCE(gceController)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		It("should conform to Ingress spec", func() {
 | 
			
		||||
@@ -251,412 +146,3 @@ var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() {
 | 
			
		||||
		// zone based on pod labels.
 | 
			
		||||
	})
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
func describeIng(ns string) {
 | 
			
		||||
	framework.Logf("\nOutput of kubectl describe ing:\n")
 | 
			
		||||
	desc, _ := framework.RunKubectl(
 | 
			
		||||
		"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
	framework.Logf(desc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func exists(path string) bool {
 | 
			
		||||
	_, err := os.Stat(path)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if os.IsNotExist(err) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	framework.Failf("Failed to os.Stat path %v", path)
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newTestJig(c *client.Client) *testJig {
 | 
			
		||||
	return &testJig{client: c, rootCAs: map[string][]byte{}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type testJig struct {
 | 
			
		||||
	client  *client.Client
 | 
			
		||||
	rootCAs map[string][]byte
 | 
			
		||||
	address string
 | 
			
		||||
	ing     *extensions.Ingress
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// createIngress creates the Ingress and associated service/rc.
 | 
			
		||||
// Required: ing.yaml, rc.yaml, svc.yaml must exist in manifestPath
 | 
			
		||||
// Optional: secret.yaml, ingAnnotations
 | 
			
		||||
// If ingAnnotations is specified it will overwrite any annotations in ing.yaml
 | 
			
		||||
func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[string]string) {
 | 
			
		||||
	mkpath := func(file string) string {
 | 
			
		||||
		return filepath.Join(framework.TestContext.RepoRoot, manifestPath, file)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	framework.Logf("creating replication controller")
 | 
			
		||||
	framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
 | 
			
		||||
	framework.Logf("creating service")
 | 
			
		||||
	framework.RunKubectlOrDie("create", "-f", mkpath("svc.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
 | 
			
		||||
	if exists(mkpath("secret.yaml")) {
 | 
			
		||||
		framework.Logf("creating secret")
 | 
			
		||||
		framework.RunKubectlOrDie("create", "-f", mkpath("secret.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
	}
 | 
			
		||||
	j.ing = ingFromManifest(mkpath("ing.yaml"))
 | 
			
		||||
	j.ing.Namespace = ns
 | 
			
		||||
	if len(ingAnnotations) != 0 {
 | 
			
		||||
		j.ing.Annotations = ingAnnotations
 | 
			
		||||
	}
 | 
			
		||||
	framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress"))
 | 
			
		||||
	var err error
 | 
			
		||||
	j.ing, err = j.client.Extensions().Ingress(ns).Create(j.ing)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) update(update func(ing *extensions.Ingress)) {
 | 
			
		||||
	var err error
 | 
			
		||||
	ns, name := j.ing.Namespace, j.ing.Name
 | 
			
		||||
	for i := 0; i < 3; i++ {
 | 
			
		||||
		j.ing, err = j.client.Extensions().Ingress(ns).Get(name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Failf("failed to get ingress %q: %v", name, err)
 | 
			
		||||
		}
 | 
			
		||||
		update(j.ing)
 | 
			
		||||
		j.ing, err = j.client.Extensions().Ingress(ns).Update(j.ing)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			describeIng(j.ing.Namespace)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
 | 
			
		||||
			framework.Failf("failed to update ingress %q: %v", name, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	framework.Failf("too many retries updating ingress %q", name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) addHTTPS(secretName string, hosts ...string) {
 | 
			
		||||
	j.ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
 | 
			
		||||
	// TODO: Just create the secret in getRootCAs once we're watching secrets in
 | 
			
		||||
	// the ingress controller.
 | 
			
		||||
	_, cert, _, err := createSecret(j.client, j.ing)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
	framework.Logf("Updating ingress %v to use secret %v for TLS termination", j.ing.Name, secretName)
 | 
			
		||||
	j.update(func(ing *extensions.Ingress) {
 | 
			
		||||
		ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
 | 
			
		||||
	})
 | 
			
		||||
	j.rootCAs[secretName] = cert
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) getRootCA(secretName string) (rootCA []byte) {
 | 
			
		||||
	var ok bool
 | 
			
		||||
	rootCA, ok = j.rootCAs[secretName]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) deleteIngress() {
 | 
			
		||||
	ExpectNoError(j.client.Extensions().Ingress(j.ing.Namespace).Delete(j.ing.Name, nil))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) waitForIngress() {
 | 
			
		||||
	// Wait for the loadbalancer IP.
 | 
			
		||||
	address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
 | 
			
		||||
	}
 | 
			
		||||
	j.address = address
 | 
			
		||||
	framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
 | 
			
		||||
	timeoutClient := &http.Client{Timeout: reqTimeout}
 | 
			
		||||
 | 
			
		||||
	// Check that all rules respond to a simple GET.
 | 
			
		||||
	for _, rules := range j.ing.Spec.Rules {
 | 
			
		||||
		proto := "http"
 | 
			
		||||
		if len(j.ing.Spec.TLS) > 0 {
 | 
			
		||||
			knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
 | 
			
		||||
			if knownHosts.Has(rules.Host) {
 | 
			
		||||
				timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
 | 
			
		||||
				ExpectNoError(err)
 | 
			
		||||
				proto = "https"
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		for _, p := range rules.IngressRuleValue.HTTP.Paths {
 | 
			
		||||
			j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
 | 
			
		||||
			route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
 | 
			
		||||
			framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
 | 
			
		||||
			ExpectNoError(j.pollURL(route, rules.Host, lbPollTimeout, timeoutClient, false))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// verifyURL polls for the given iterations, in intervals, and fails if the
 | 
			
		||||
// given url returns a non-healthy http code even once.
 | 
			
		||||
func (j *testJig) verifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error {
 | 
			
		||||
	for i := 0; i < iterations; i++ {
 | 
			
		||||
		b, err := simpleGET(httpClient, route, host)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf(b)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		framework.Logf("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval)
 | 
			
		||||
		time.Sleep(interval)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// pollURL polls till the url responds with a healthy http code. If
 | 
			
		||||
// expectUnreachable is true, it breaks on first non-healthy http code instead.
 | 
			
		||||
func (j *testJig) pollURL(route, host string, timeout time.Duration, httpClient *http.Client, expectUnreachable bool) error {
 | 
			
		||||
	var lastBody string
 | 
			
		||||
	pollErr := wait.PollImmediate(lbPollInterval, timeout, func() (bool, error) {
 | 
			
		||||
		var err error
 | 
			
		||||
		lastBody, err = simpleGET(httpClient, route, host)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf("host %v path %v: %v unreachable", host, route, err)
 | 
			
		||||
			return expectUnreachable, nil
 | 
			
		||||
		}
 | 
			
		||||
		return !expectUnreachable, nil
 | 
			
		||||
	})
 | 
			
		||||
	if pollErr != nil {
 | 
			
		||||
		return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
 | 
			
		||||
			timeout, route, host, lastBody, pollErr)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) curlServiceNodePort(ns, name string, port int) {
 | 
			
		||||
	// TODO: Curl all nodes?
 | 
			
		||||
	u, err := framework.GetNodePortURL(j.client, ns, name, port)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
	ExpectNoError(j.pollURL(u, "", 30*time.Second, &http.Client{Timeout: reqTimeout}, false))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ingFromManifest reads a .json/yaml file and returns the rc in it.
 | 
			
		||||
func ingFromManifest(fileName string) *extensions.Ingress {
 | 
			
		||||
	var ing extensions.Ingress
 | 
			
		||||
	framework.Logf("Parsing ingress from %v", fileName)
 | 
			
		||||
	data, err := ioutil.ReadFile(fileName)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	json, err := utilyaml.ToJSON(data)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
 | 
			
		||||
	return &ing
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// gcloudList unmarshals json output of gcloud into given out interface.
 | 
			
		||||
func gcloudList(resource, regex, project string, out interface{}) {
 | 
			
		||||
	// gcloud prints a message to stderr if it has an available update
 | 
			
		||||
	// so we only look at stdout.
 | 
			
		||||
	command := []string{
 | 
			
		||||
		"compute", resource, "list",
 | 
			
		||||
		fmt.Sprintf("--regex=%v", regex),
 | 
			
		||||
		fmt.Sprintf("--project=%v", project),
 | 
			
		||||
		"-q", "--format=json",
 | 
			
		||||
	}
 | 
			
		||||
	output, err := exec.Command("gcloud", command...).Output()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		errCode := -1
 | 
			
		||||
		if exitErr, ok := err.(utilexec.ExitError); ok {
 | 
			
		||||
			errCode = exitErr.ExitStatus()
 | 
			
		||||
		}
 | 
			
		||||
		framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode)
 | 
			
		||||
	}
 | 
			
		||||
	if err := json.Unmarshal([]byte(output), out); err != nil {
 | 
			
		||||
		framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func gcloudDelete(resource, name, project string, args ...string) error {
 | 
			
		||||
	framework.Logf("Deleting %v: %v", resource, name)
 | 
			
		||||
	argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
 | 
			
		||||
	output, err := exec.Command("gcloud", argList...).CombinedOutput()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func gcloudCreate(resource, name, project string, args ...string) error {
 | 
			
		||||
	framework.Logf("Creating %v in project %v: %v", resource, project, name)
 | 
			
		||||
	argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
 | 
			
		||||
	output, err := exec.Command("gcloud", argsList...).CombinedOutput()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GCEIngressController manages implementation details of Ingress on GCE/GKE.
 | 
			
		||||
type GCEIngressController struct {
 | 
			
		||||
	ns           string
 | 
			
		||||
	rcPath       string
 | 
			
		||||
	UID          string
 | 
			
		||||
	Project      string
 | 
			
		||||
	staticIPName string
 | 
			
		||||
	rc           *api.ReplicationController
 | 
			
		||||
	svc          *api.Service
 | 
			
		||||
	c            *client.Client
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
 | 
			
		||||
	framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap)
 | 
			
		||||
	cm, err := cont.c.ConfigMaps(api.NamespaceSystem).Get(uidConfigMap)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	if uid, ok := cm.Data[uidKey]; ok {
 | 
			
		||||
		return uid, nil
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) init() {
 | 
			
		||||
	uid, err := cont.getL7AddonUID()
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	cont.UID = uid
 | 
			
		||||
	// There's a name limit imposed by GCE. The controller will truncate.
 | 
			
		||||
	testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
 | 
			
		||||
	if len(testName) > nameLenLimit {
 | 
			
		||||
		framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
 | 
			
		||||
	} else {
 | 
			
		||||
		framework.Logf("Deteced cluster UID %v", cont.UID)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) staticIP(name string) string {
 | 
			
		||||
	ExpectNoError(gcloudCreate("addresses", name, cont.Project, "--global"))
 | 
			
		||||
	cont.staticIPName = name
 | 
			
		||||
	ipList := []compute.Address{}
 | 
			
		||||
	if pollErr := wait.PollImmediate(5*time.Second, cloudResourcePollTimeout, func() (bool, error) {
 | 
			
		||||
		gcloudList("addresses", name, cont.Project, &ipList)
 | 
			
		||||
		if len(ipList) != 1 {
 | 
			
		||||
			framework.Logf("Failed to find static ip %v even though create call succeeded, found ips %+v", name, ipList)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}); pollErr != nil {
 | 
			
		||||
		if err := gcloudDelete("addresses", name, cont.Project, "--global"); err == nil {
 | 
			
		||||
			framework.Logf("Failed to get AND delete address %v even though create call succeeded", name)
 | 
			
		||||
		}
 | 
			
		||||
		framework.Failf("Failed to find static ip %v even though create call succeeded, found ips %+v", name, ipList)
 | 
			
		||||
	}
 | 
			
		||||
	return ipList[0].Address
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Cleanup cleans up cloud resources.
 | 
			
		||||
// If del is false, it simply reports existing resources without deleting them.
 | 
			
		||||
// It always deletes resources created through it's methods, like staticIP, even
 | 
			
		||||
// if del is false.
 | 
			
		||||
func (cont *GCEIngressController) Cleanup(del bool) error {
 | 
			
		||||
	errMsg := ""
 | 
			
		||||
	// Ordering is important here because we cannot delete resources that other
 | 
			
		||||
	// resources hold references to.
 | 
			
		||||
	fwList := []compute.ForwardingRule{}
 | 
			
		||||
	for _, regex := range []string{fmt.Sprintf("k8s-fw-.*--%v", cont.UID), fmt.Sprintf("k8s-fws-.*--%v", cont.UID)} {
 | 
			
		||||
		gcloudList("forwarding-rules", regex, cont.Project, &fwList)
 | 
			
		||||
		if len(fwList) != 0 {
 | 
			
		||||
			msg := ""
 | 
			
		||||
			for _, f := range fwList {
 | 
			
		||||
				msg += fmt.Sprintf("%v\n", f.Name)
 | 
			
		||||
				if del {
 | 
			
		||||
					gcloudDelete("forwarding-rules", f.Name, cont.Project, "--global")
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			errMsg += fmt.Sprintf("\nFound forwarding rules:\n%v", msg)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Static IPs are named after forwarding rules.
 | 
			
		||||
	ipList := []compute.Address{}
 | 
			
		||||
	gcloudList("addresses", fmt.Sprintf("k8s-fw-.*--%v", cont.UID), cont.Project, &ipList)
 | 
			
		||||
	if len(ipList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, ip := range ipList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", ip.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("addresses", ip.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found addresses:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If the test allocated a static ip, delete that regardless
 | 
			
		||||
	if cont.staticIPName != "" {
 | 
			
		||||
		if err := gcloudDelete("addresses", cont.staticIPName, cont.Project, "--global"); err == nil {
 | 
			
		||||
			cont.staticIPName = ""
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tpList := []compute.TargetHttpProxy{}
 | 
			
		||||
	gcloudList("target-http-proxies", fmt.Sprintf("k8s-tp-.*--%v", cont.UID), cont.Project, &tpList)
 | 
			
		||||
	if len(tpList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, t := range tpList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", t.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("target-http-proxies", t.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found target proxies:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	tpsList := []compute.TargetHttpsProxy{}
 | 
			
		||||
	gcloudList("target-https-proxies", fmt.Sprintf("k8s-tps-.*--%v", cont.UID), cont.Project, &tpsList)
 | 
			
		||||
	if len(tpsList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, t := range tpsList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", t.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("target-https-proxies", t.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found target HTTPS proxies:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	// TODO: Check for leaked ssl certs.
 | 
			
		||||
 | 
			
		||||
	umList := []compute.UrlMap{}
 | 
			
		||||
	gcloudList("url-maps", fmt.Sprintf("k8s-um-.*--%v", cont.UID), cont.Project, &umList)
 | 
			
		||||
	if len(umList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, u := range umList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", u.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("url-maps", u.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found url maps:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	beList := []compute.BackendService{}
 | 
			
		||||
	gcloudList("backend-services", fmt.Sprintf("k8s-be-[0-9]+--%v", cont.UID), cont.Project, &beList)
 | 
			
		||||
	if len(beList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, b := range beList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", b.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("backend-services", b.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found backend services:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hcList := []compute.HttpHealthCheck{}
 | 
			
		||||
	gcloudList("http-health-checks", fmt.Sprintf("k8s-be-[0-9]+--%v", cont.UID), cont.Project, &hcList)
 | 
			
		||||
	if len(hcList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, h := range hcList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", h.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("http-health-checks", h.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		errMsg += fmt.Sprintf("Found health check:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	// TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told
 | 
			
		||||
	// to unmarshal instance groups into the current vendored gce-client's understanding
 | 
			
		||||
	// of the struct.
 | 
			
		||||
	if errMsg == "" {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Errorf(errMsg)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -23,20 +23,35 @@ import (
 | 
			
		||||
	"crypto/tls"
 | 
			
		||||
	"crypto/x509"
 | 
			
		||||
	"crypto/x509/pkix"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"encoding/pem"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"math/big"
 | 
			
		||||
	"net"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
 | 
			
		||||
	compute "google.golang.org/api/compute/v1"
 | 
			
		||||
	apierrs "k8s.io/kubernetes/pkg/api/errors"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
			
		||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
			
		||||
	utilexec "k8s.io/kubernetes/pkg/util/exec"
 | 
			
		||||
	utilnet "k8s.io/kubernetes/pkg/util/net"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/wait"
 | 
			
		||||
	utilyaml "k8s.io/kubernetes/pkg/util/yaml"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -44,6 +59,118 @@ const (
 | 
			
		||||
	validFor = 365 * 24 * time.Hour
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type testJig struct {
 | 
			
		||||
	client  *client.Client
 | 
			
		||||
	rootCAs map[string][]byte
 | 
			
		||||
	address string
 | 
			
		||||
	ing     *extensions.Ingress
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type conformanceTests struct {
 | 
			
		||||
	entryLog string
 | 
			
		||||
	execute  func()
 | 
			
		||||
	exitLog  string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createComformanceTests(jig *testJig, ns string) []conformanceTests {
 | 
			
		||||
	manifestPath := filepath.Join(ingressManifestPath, "http")
 | 
			
		||||
	// These constants match the manifests used in ingressManifestPath
 | 
			
		||||
	tlsHost := "foo.bar.com"
 | 
			
		||||
	tlsSecretName := "foo"
 | 
			
		||||
	updatedTLSHost := "foobar.com"
 | 
			
		||||
	updateURLMapHost := "bar.baz.com"
 | 
			
		||||
	updateURLMapPath := "/testurl"
 | 
			
		||||
	// Platform agnostic list of tests that must be satisfied by all controllers
 | 
			
		||||
	return []conformanceTests{
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should create a basic HTTP ingress"),
 | 
			
		||||
			func() { jig.createIngress(manifestPath, ns, map[string]string{}) },
 | 
			
		||||
			fmt.Sprintf("waiting for urls on basic HTTP ingress"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should terminate TLS for host %v", tlsHost),
 | 
			
		||||
			func() { jig.addHTTPS(tlsSecretName, tlsHost) },
 | 
			
		||||
			fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should update SSL certificated with modified hostname %v", updatedTLSHost),
 | 
			
		||||
			func() {
 | 
			
		||||
				jig.update(func(ing *extensions.Ingress) {
 | 
			
		||||
					newRules := []extensions.IngressRule{}
 | 
			
		||||
					for _, rule := range ing.Spec.Rules {
 | 
			
		||||
						if rule.Host != tlsHost {
 | 
			
		||||
							newRules = append(newRules, rule)
 | 
			
		||||
							continue
 | 
			
		||||
						}
 | 
			
		||||
						newRules = append(newRules, extensions.IngressRule{
 | 
			
		||||
							Host:             updatedTLSHost,
 | 
			
		||||
							IngressRuleValue: rule.IngressRuleValue,
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
					ing.Spec.Rules = newRules
 | 
			
		||||
				})
 | 
			
		||||
				jig.addHTTPS(tlsSecretName, updatedTLSHost)
 | 
			
		||||
			},
 | 
			
		||||
			fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
 | 
			
		||||
			func() {
 | 
			
		||||
				var pathToFail string
 | 
			
		||||
				jig.update(func(ing *extensions.Ingress) {
 | 
			
		||||
					newRules := []extensions.IngressRule{}
 | 
			
		||||
					for _, rule := range ing.Spec.Rules {
 | 
			
		||||
						if rule.Host != updateURLMapHost {
 | 
			
		||||
							newRules = append(newRules, rule)
 | 
			
		||||
							continue
 | 
			
		||||
						}
 | 
			
		||||
						existingPath := rule.IngressRuleValue.HTTP.Paths[0]
 | 
			
		||||
						pathToFail = existingPath.Path
 | 
			
		||||
						newRules = append(newRules, extensions.IngressRule{
 | 
			
		||||
							Host: updateURLMapHost,
 | 
			
		||||
							IngressRuleValue: extensions.IngressRuleValue{
 | 
			
		||||
								HTTP: &extensions.HTTPIngressRuleValue{
 | 
			
		||||
									Paths: []extensions.HTTPIngressPath{
 | 
			
		||||
										{
 | 
			
		||||
											Path:    updateURLMapPath,
 | 
			
		||||
											Backend: existingPath.Backend,
 | 
			
		||||
										},
 | 
			
		||||
									},
 | 
			
		||||
								},
 | 
			
		||||
							},
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
					ing.Spec.Rules = newRules
 | 
			
		||||
				})
 | 
			
		||||
				fmt.Sprintf("Checking that %s is not exposed by polling for failure", pathToFail)
 | 
			
		||||
				route := fmt.Sprintf("http://%v%v", jig.address, pathToFail)
 | 
			
		||||
				ExpectNoError(jig.pollURL(route, updateURLMapHost, lbCleanupTimeout, &http.Client{Timeout: reqTimeout}, true))
 | 
			
		||||
			},
 | 
			
		||||
			fmt.Sprintf("Waiting for path updates to reflect in L7"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// pollURL polls till the url responds with a healthy http code. If
 | 
			
		||||
// expectUnreachable is true, it breaks on first non-healthy http code instead.
 | 
			
		||||
func (j *testJig) pollURL(route, host string, timeout time.Duration, httpClient *http.Client, expectUnreachable bool) error {
 | 
			
		||||
	var lastBody string
 | 
			
		||||
	pollErr := wait.PollImmediate(lbPollInterval, timeout, func() (bool, error) {
 | 
			
		||||
		var err error
 | 
			
		||||
		lastBody, err = simpleGET(httpClient, route, host)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf("host %v path %v: %v unreachable", host, route, err)
 | 
			
		||||
			return expectUnreachable, nil
 | 
			
		||||
		}
 | 
			
		||||
		return !expectUnreachable, nil
 | 
			
		||||
	})
 | 
			
		||||
	if pollErr != nil {
 | 
			
		||||
		return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
 | 
			
		||||
			timeout, route, host, lastBody, pollErr)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// generateRSACerts generates a basic self signed certificate using a key length
 | 
			
		||||
// of rsaBits, valid for validFor time.
 | 
			
		||||
func generateRSACerts(host string, isCA bool, keyOut, certOut io.Writer) error {
 | 
			
		||||
@@ -162,3 +289,431 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri
 | 
			
		||||
	}
 | 
			
		||||
	return host, cert, key, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func describeIng(ns string) {
 | 
			
		||||
	framework.Logf("\nOutput of kubectl describe ing:\n")
 | 
			
		||||
	desc, _ := framework.RunKubectl(
 | 
			
		||||
		"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
	framework.Logf(desc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func cleanupGCE(gceController *GCEIngressController) {
 | 
			
		||||
	if pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) {
 | 
			
		||||
		if err := gceController.Cleanup(false); err != nil {
 | 
			
		||||
			framework.Logf("Still waiting for glbc to cleanup: %v", err)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}); pollErr != nil {
 | 
			
		||||
		if cleanupErr := gceController.Cleanup(true); cleanupErr != nil {
 | 
			
		||||
			framework.Logf("WARNING: Failed to cleanup resources %v", cleanupErr)
 | 
			
		||||
		}
 | 
			
		||||
		framework.Failf("Failed to cleanup GCE L7 resources.")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	fwList := []compute.ForwardingRule{}
 | 
			
		||||
	for _, regex := range []string{fmt.Sprintf("k8s-fw-.*--%v", cont.UID), fmt.Sprintf("k8s-fws-.*--%v", cont.UID)} {
 | 
			
		||||
		gcloudList("forwarding-rules", regex, cont.Project, &fwList)
 | 
			
		||||
		if len(fwList) != 0 {
 | 
			
		||||
			for _, f := range fwList {
 | 
			
		||||
				msg += fmt.Sprintf("%v\n", f.Name)
 | 
			
		||||
				if del {
 | 
			
		||||
					gcloudDelete("forwarding-rules", f.Name, cont.Project, "--global")
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			msg += fmt.Sprintf("\nFound forwarding rules:\n%v", msg)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteAddresses(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	ipList := []compute.Address{}
 | 
			
		||||
	gcloudList("addresses", fmt.Sprintf("k8s-fw-.*--%v", cont.UID), cont.Project, &ipList)
 | 
			
		||||
	if len(ipList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, ip := range ipList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", ip.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("addresses", ip.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found addresses:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	// If the test allocated a static ip, delete that regardless
 | 
			
		||||
	if cont.staticIPName != "" {
 | 
			
		||||
		if err := gcloudDelete("addresses", cont.staticIPName, cont.Project, "--global"); err == nil {
 | 
			
		||||
			cont.staticIPName = ""
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	tpList := []compute.TargetHttpProxy{}
 | 
			
		||||
	gcloudList("target-http-proxies", fmt.Sprintf("k8s-tp-.*--%v", cont.UID), cont.Project, &tpList)
 | 
			
		||||
	if len(tpList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, t := range tpList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", t.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("target-http-proxies", t.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found target proxies:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	tpsList := []compute.TargetHttpsProxy{}
 | 
			
		||||
	gcloudList("target-https-proxies", fmt.Sprintf("k8s-tps-.*--%v", cont.UID), cont.Project, &tpsList)
 | 
			
		||||
	if len(tpsList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, t := range tpsList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", t.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("target-https-proxies", t.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found target HTTPS proxies:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteUrlMap(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	umList := []compute.UrlMap{}
 | 
			
		||||
	gcloudList("url-maps", fmt.Sprintf("k8s-um-.*--%v", cont.UID), cont.Project, &umList)
 | 
			
		||||
	if len(umList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, u := range umList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", u.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("url-maps", u.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found url maps:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteBackendService(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	beList := []compute.BackendService{}
 | 
			
		||||
	gcloudList("backend-services", fmt.Sprintf("k8s-be-[0-9]+--%v", cont.UID), cont.Project, &beList)
 | 
			
		||||
	if len(beList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, b := range beList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", b.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("backend-services", b.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found backend services:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) deleteHttpHealthCheck(del bool) string {
 | 
			
		||||
	msg := ""
 | 
			
		||||
	hcList := []compute.HttpHealthCheck{}
 | 
			
		||||
	gcloudList("http-health-checks", fmt.Sprintf("k8s-be-[0-9]+--%v", cont.UID), cont.Project, &hcList)
 | 
			
		||||
	if len(hcList) != 0 {
 | 
			
		||||
		msg := ""
 | 
			
		||||
		for _, h := range hcList {
 | 
			
		||||
			msg += fmt.Sprintf("%v\n", h.Name)
 | 
			
		||||
			if del {
 | 
			
		||||
				gcloudDelete("http-health-checks", h.Name, cont.Project)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		msg += fmt.Sprintf("Found health check:\n%v", msg)
 | 
			
		||||
	}
 | 
			
		||||
	return msg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Cleanup cleans up cloud resources.
 | 
			
		||||
// If del is false, it simply reports existing resources without deleting them.
 | 
			
		||||
// It always deletes resources created through it's methods, like staticIP, even
 | 
			
		||||
// if del is false.
 | 
			
		||||
func (cont *GCEIngressController) Cleanup(del bool) error {
 | 
			
		||||
	// Ordering is important here because we cannot delete resources that other
 | 
			
		||||
	// resources hold references to.
 | 
			
		||||
	errMsg := cont.deleteForwardingRule(del)
 | 
			
		||||
	// Static IPs are named after forwarding rules.
 | 
			
		||||
	errMsg += cont.deleteAddresses(del)
 | 
			
		||||
	// TODO: Check for leaked ssl certs.
 | 
			
		||||
 | 
			
		||||
	errMsg += cont.deleteTargetProxy(del)
 | 
			
		||||
	errMsg += cont.deleteUrlMap(del)
 | 
			
		||||
	errMsg += cont.deleteBackendService(del)
 | 
			
		||||
	errMsg += cont.deleteHttpHealthCheck(del)
 | 
			
		||||
 | 
			
		||||
	// TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told
 | 
			
		||||
	// to unmarshal instance groups into the current vendored gce-client's understanding
 | 
			
		||||
	// of the struct.
 | 
			
		||||
	if errMsg == "" {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Errorf(errMsg)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) init() {
 | 
			
		||||
	uid, err := cont.getL7AddonUID()
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	cont.UID = uid
 | 
			
		||||
	// There's a name limit imposed by GCE. The controller will truncate.
 | 
			
		||||
	testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
 | 
			
		||||
	if len(testName) > nameLenLimit {
 | 
			
		||||
		framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
 | 
			
		||||
	} else {
 | 
			
		||||
		framework.Logf("Deteced cluster UID %v", cont.UID)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) staticIP(name string) string {
 | 
			
		||||
	ExpectNoError(gcloudCreate("addresses", name, cont.Project, "--global"))
 | 
			
		||||
	cont.staticIPName = name
 | 
			
		||||
	ipList := []compute.Address{}
 | 
			
		||||
	if pollErr := wait.PollImmediate(5*time.Second, cloudResourcePollTimeout, func() (bool, error) {
 | 
			
		||||
		gcloudList("addresses", name, cont.Project, &ipList)
 | 
			
		||||
		if len(ipList) != 1 {
 | 
			
		||||
			framework.Logf("Failed to find static ip %v even though create call succeeded, found ips %+v", name, ipList)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}); pollErr != nil {
 | 
			
		||||
		if err := gcloudDelete("addresses", name, cont.Project, "--global"); err == nil {
 | 
			
		||||
			framework.Logf("Failed to get AND delete address %v even though create call succeeded", name)
 | 
			
		||||
		}
 | 
			
		||||
		framework.Failf("Failed to find static ip %v even though create call succeeded, found ips %+v", name, ipList)
 | 
			
		||||
	}
 | 
			
		||||
	return ipList[0].Address
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// gcloudList unmarshals json output of gcloud into given out interface.
 | 
			
		||||
func gcloudList(resource, regex, project string, out interface{}) {
 | 
			
		||||
	// gcloud prints a message to stderr if it has an available update
 | 
			
		||||
	// so we only look at stdout.
 | 
			
		||||
	command := []string{
 | 
			
		||||
		"compute", resource, "list",
 | 
			
		||||
		fmt.Sprintf("--regex=%v", regex),
 | 
			
		||||
		fmt.Sprintf("--project=%v", project),
 | 
			
		||||
		"-q", "--format=json",
 | 
			
		||||
	}
 | 
			
		||||
	output, err := exec.Command("gcloud", command...).Output()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		errCode := -1
 | 
			
		||||
		if exitErr, ok := err.(utilexec.ExitError); ok {
 | 
			
		||||
			errCode = exitErr.ExitStatus()
 | 
			
		||||
		}
 | 
			
		||||
		framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode)
 | 
			
		||||
	}
 | 
			
		||||
	if err := json.Unmarshal([]byte(output), out); err != nil {
 | 
			
		||||
		framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func gcloudDelete(resource, name, project string, args ...string) error {
 | 
			
		||||
	framework.Logf("Deleting %v: %v", resource, name)
 | 
			
		||||
	argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
 | 
			
		||||
	output, err := exec.Command("gcloud", argList...).CombinedOutput()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func gcloudCreate(resource, name, project string, args ...string) error {
 | 
			
		||||
	framework.Logf("Creating %v in project %v: %v", resource, project, name)
 | 
			
		||||
	argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
 | 
			
		||||
	output, err := exec.Command("gcloud", argsList...).CombinedOutput()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// createIngress creates the Ingress and associated service/rc.
 | 
			
		||||
// Required: ing.yaml, rc.yaml, svc.yaml must exist in manifestPath
 | 
			
		||||
// Optional: secret.yaml, ingAnnotations
 | 
			
		||||
// If ingAnnotations is specified it will overwrite any annotations in ing.yaml
 | 
			
		||||
func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[string]string) {
 | 
			
		||||
	mkpath := func(file string) string {
 | 
			
		||||
		return filepath.Join(framework.TestContext.RepoRoot, manifestPath, file)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	framework.Logf("creating replication controller")
 | 
			
		||||
	framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
 | 
			
		||||
	framework.Logf("creating service")
 | 
			
		||||
	framework.RunKubectlOrDie("create", "-f", mkpath("svc.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
 | 
			
		||||
	if exists(mkpath("secret.yaml")) {
 | 
			
		||||
		framework.Logf("creating secret")
 | 
			
		||||
		framework.RunKubectlOrDie("create", "-f", mkpath("secret.yaml"), fmt.Sprintf("--namespace=%v", ns))
 | 
			
		||||
	}
 | 
			
		||||
	j.ing = ingFromManifest(mkpath("ing.yaml"))
 | 
			
		||||
	j.ing.Namespace = ns
 | 
			
		||||
	if len(ingAnnotations) != 0 {
 | 
			
		||||
		j.ing.Annotations = ingAnnotations
 | 
			
		||||
	}
 | 
			
		||||
	framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress"))
 | 
			
		||||
	var err error
 | 
			
		||||
	j.ing, err = j.client.Extensions().Ingress(ns).Create(j.ing)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) update(update func(ing *extensions.Ingress)) {
 | 
			
		||||
	var err error
 | 
			
		||||
	ns, name := j.ing.Namespace, j.ing.Name
 | 
			
		||||
	for i := 0; i < 3; i++ {
 | 
			
		||||
		j.ing, err = j.client.Extensions().Ingress(ns).Get(name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Failf("failed to get ingress %q: %v", name, err)
 | 
			
		||||
		}
 | 
			
		||||
		update(j.ing)
 | 
			
		||||
		j.ing, err = j.client.Extensions().Ingress(ns).Update(j.ing)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			describeIng(j.ing.Namespace)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
 | 
			
		||||
			framework.Failf("failed to update ingress %q: %v", name, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	framework.Failf("too many retries updating ingress %q", name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) addHTTPS(secretName string, hosts ...string) {
 | 
			
		||||
	j.ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
 | 
			
		||||
	// TODO: Just create the secret in getRootCAs once we're watching secrets in
 | 
			
		||||
	// the ingress controller.
 | 
			
		||||
	_, cert, _, err := createSecret(j.client, j.ing)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
	framework.Logf("Updating ingress %v to use secret %v for TLS termination", j.ing.Name, secretName)
 | 
			
		||||
	j.update(func(ing *extensions.Ingress) {
 | 
			
		||||
		ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
 | 
			
		||||
	})
 | 
			
		||||
	j.rootCAs[secretName] = cert
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) getRootCA(secretName string) (rootCA []byte) {
 | 
			
		||||
	var ok bool
 | 
			
		||||
	rootCA, ok = j.rootCAs[secretName]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) deleteIngress() {
 | 
			
		||||
	ExpectNoError(j.client.Extensions().Ingress(j.ing.Namespace).Delete(j.ing.Name, nil))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) waitForIngress() {
 | 
			
		||||
	// Wait for the loadbalancer IP.
 | 
			
		||||
	address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
 | 
			
		||||
	}
 | 
			
		||||
	j.address = address
 | 
			
		||||
	framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
 | 
			
		||||
	timeoutClient := &http.Client{Timeout: reqTimeout}
 | 
			
		||||
 | 
			
		||||
	// Check that all rules respond to a simple GET.
 | 
			
		||||
	for _, rules := range j.ing.Spec.Rules {
 | 
			
		||||
		proto := "http"
 | 
			
		||||
		if len(j.ing.Spec.TLS) > 0 {
 | 
			
		||||
			knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
 | 
			
		||||
			if knownHosts.Has(rules.Host) {
 | 
			
		||||
				timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
 | 
			
		||||
				ExpectNoError(err)
 | 
			
		||||
				proto = "https"
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		for _, p := range rules.IngressRuleValue.HTTP.Paths {
 | 
			
		||||
			j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
 | 
			
		||||
			route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
 | 
			
		||||
			framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
 | 
			
		||||
			ExpectNoError(j.pollURL(route, rules.Host, lbPollTimeout, timeoutClient, false))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// verifyURL polls for the given iterations, in intervals, and fails if the
 | 
			
		||||
// given url returns a non-healthy http code even once.
 | 
			
		||||
func (j *testJig) verifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error {
 | 
			
		||||
	for i := 0; i < iterations; i++ {
 | 
			
		||||
		b, err := simpleGET(httpClient, route, host)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			framework.Logf(b)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		framework.Logf("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval)
 | 
			
		||||
		time.Sleep(interval)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (j *testJig) curlServiceNodePort(ns, name string, port int) {
 | 
			
		||||
	// TODO: Curl all nodes?
 | 
			
		||||
	u, err := framework.GetNodePortURL(j.client, ns, name, port)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
	ExpectNoError(j.pollURL(u, "", 30*time.Second, &http.Client{Timeout: reqTimeout}, false))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ingFromManifest reads a .json/yaml file and returns the rc in it.
 | 
			
		||||
func ingFromManifest(fileName string) *extensions.Ingress {
 | 
			
		||||
	var ing extensions.Ingress
 | 
			
		||||
	framework.Logf("Parsing ingress from %v", fileName)
 | 
			
		||||
	data, err := ioutil.ReadFile(fileName)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	json, err := utilyaml.ToJSON(data)
 | 
			
		||||
	ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
 | 
			
		||||
	return &ing
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
 | 
			
		||||
	framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap)
 | 
			
		||||
	cm, err := cont.c.ConfigMaps(api.NamespaceSystem).Get(uidConfigMap)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	if uid, ok := cm.Data[uidKey]; ok {
 | 
			
		||||
		return uid, nil
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func exists(path string) bool {
 | 
			
		||||
	_, err := os.Stat(path)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if os.IsNotExist(err) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	framework.Failf("Failed to os.Stat path %v", path)
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GCEIngressController manages implementation details of Ingress on GCE/GKE.
 | 
			
		||||
type GCEIngressController struct {
 | 
			
		||||
	ns           string
 | 
			
		||||
	rcPath       string
 | 
			
		||||
	UID          string
 | 
			
		||||
	Project      string
 | 
			
		||||
	staticIPName string
 | 
			
		||||
	rc           *api.ReplicationController
 | 
			
		||||
	svc          *api.Service
 | 
			
		||||
	c            *client.Client
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newTestJig(c *client.Client) *testJig {
 | 
			
		||||
	return &testJig{client: c, rootCAs: map[string][]byte{}}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user