diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c14c8360f4..9401ee0093f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,168 +1,276 @@ -- [v1.7.0-rc.1](#v170-rc1) - - [Downloads for v1.7.0-rc.1](#downloads-for-v170-rc1) +- [v1.7.1](#v171) + - [Downloads for v1.7.1](#downloads-for-v171) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2) - - [Action Required](#action-required) + - [Changelog since v1.7.0](#changelog-since-v170) - [Other notable changes](#other-notable-changes) -- [v1.8.0-alpha.1](#v180-alpha1) - - [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1) +- [v1.8.0-alpha.2](#v180-alpha2) + - [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4) - - [Action Required](#action-required-1) + - [Changelog since v1.7.0](#changelog-since-v170-1) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-1) -- [v1.6.6](#v166) - - [Downloads for v1.6.6](#downloads-for-v166) +- [v1.6.7](#v167) + - [Downloads for v1.6.7](#downloads-for-v167) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.6.5](#changelog-since-v165) - - [Action Required](#action-required-2) + - [Changelog since v1.6.6](#changelog-since-v166) - [Other notable changes](#other-notable-changes-2) -- [v1.7.0-beta.2](#v170-beta2) - - [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2) +- [v1.7.0](#v170) + - [Downloads for v1.7.0](#downloads-for-v170) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1) - - [Action Required](#action-required-3) - - [Other notable changes](#other-notable-changes-3) -- [v1.6.5](#v165) - - [Known Issues for v1.6.5](#known-issues-for-v165) - - [Downloads for v1.6.5](#downloads-for-v165) + - [**Major Themes**](#major-themes) + - [**Action Required Before Upgrading**](#action-required-before-upgrading) + - [Network](#network) + - [Storage](#storage) + - [API Machinery](#api-machinery) + - [Controller Manager](#controller-manager) + - [kubectl (CLI)](#kubectl-cli) + - [kubeadm](#kubeadm) + - [Cloud Providers](#cloud-providers) + - [**Known Issues**](#known-issues) + - [**Deprecations**](#deprecations) + - [Cluster provisioning scripts](#cluster-provisioning-scripts) + - [Client libraries](#client-libraries) + - [DaemonSet](#daemonset) + - [kube-proxy](#kube-proxy) + - [Namespace](#namespace) + - [Scheduling](#scheduling) + - [**Notable Features**](#notable-features) + - [Kubefed](#kubefed) + - [**Kubernetes API**](#kubernetes-api) + - [User Provided Extensions](#user-provided-extensions) + - [**Application Deployment**](#application-deployment) + - [StatefulSet](#statefulset) + - [DaemonSet](#daemonset-1) + - [Deployments](#deployments) + - [PodDisruptionBudget](#poddisruptionbudget) + - [**Security**](#security) + - [Admission Control](#admission-control) + - [TLS Bootstrapping](#tls-bootstrapping) + - [Audit Logging](#audit-logging) + - [Encryption at Rest](#encryption-at-rest) + - [Node Authorization](#node-authorization) + - [**Application Autoscaling**](#application-autoscaling) + - [Horizontal Pod Autoscaler](#horizontal-pod-autoscaler) + - [**Cluster Lifecycle**](#cluster-lifecycle) + - [kubeadm](#kubeadm-1) + - [Cloud Provider Support](#cloud-provider-support) + - [**Cluster Federation**](#cluster-federation) + - [Placement Policy](#placement-policy) + - [Cluster Selection](#cluster-selection) + - [**Instrumentation**](#instrumentation) + - [Core Metrics API](#core-metrics-api) + - [**Internationalization**](#internationalization) + - [**kubectl (CLI)**](#kubectl-cli-1) + - [**Networking**](#networking) + - [Network Policy](#network-policy) + - [Load Balancing](#load-balancing) + - [**Node Components**](#node-components) + - [Container Runtime Interface](#container-runtime-interface) + - [**Scheduling**](#scheduling-1) + - [Scheduler Extender](#scheduler-extender) + - [**Storage**](#storage-1) + - [Local Storage](#local-storage) + - [Volume Plugins](#volume-plugins) + - [Metrics](#metrics) + - [**Other notable changes**](#other-notable-changes-3) + - [Admission plugin](#admission-plugin) + - [API Machinery](#api-machinery-1) + - [Application autoscaling](#application-autoscaling-1) + - [Application Deployment](#application-deployment-1) + - [Cluster Autoscaling](#cluster-autoscaling) + - [Cloud Provider Enhancement](#cloud-provider-enhancement) + - [Cluster Provisioning](#cluster-provisioning) + - [Cluster federation](#cluster-federation-1) + - [Credential provider](#credential-provider) + - [Information for Kubernetes clients (openapi, swagger, client-go)](#information-for-kubernetes-clients-openapi-swagger-client-go) + - [Instrumentation](#instrumentation-1) + - [Internal storage layer](#internal-storage-layer) + - [Kubernetes Dashboard](#kubernetes-dashboard) + - [kube-dns](#kube-dns) + - [kube-proxy](#kube-proxy-1) + - [kube-scheduler](#kube-scheduler) + - [Storage](#storage-2) + - [Networking](#networking-1) + - [Node controller](#node-controller) + - [Node Components](#node-components-1) + - [Scheduling](#scheduling-2) + - [Security](#security-1) + - [Scalability](#scalability) + - [**External Dependency Version Information**](#external-dependency-version-information) + - [Previous Releases Included in v1.7.0](#previous-releases-included-in-v170) +- [v1.7.0-rc.1](#v170-rc1) + - [Downloads for v1.7.0-rc.1](#downloads-for-v170-rc1) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.6.4](#changelog-since-v164) + - [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-4) -- [v1.7.0-beta.1](#v170-beta1) - - [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1) +- [v1.8.0-alpha.1](#v180-alpha1) + - [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4-1) - - [Action Required](#action-required-4) + - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-5) -- [v1.6.4](#v164) - - [Known Issues for v1.6.4](#known-issues-for-v164) - - [Downloads for v1.6.4](#downloads-for-v164) +- [v1.6.6](#v166) + - [Downloads for v1.6.6](#downloads-for-v166) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.6.3](#changelog-since-v163) + - [Changelog since v1.6.5](#changelog-since-v165) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-6) -- [v1.7.0-alpha.4](#v170-alpha4) - - [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4) +- [v1.7.0-beta.2](#v170-beta2) + - [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3) - - [Action Required](#action-required-5) + - [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1) + - [Action Required](#action-required-4) - [Other notable changes](#other-notable-changes-7) -- [v1.6.3](#v163) - - [Known Issues for v1.6.3](#known-issues-for-v163) - - [Downloads for v1.6.3](#downloads-for-v163) +- [v1.6.5](#v165) + - [Known Issues for v1.6.5](#known-issues-for-v165) + - [Downloads for v1.6.5](#downloads-for-v165) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.6.2](#changelog-since-v162) + - [Changelog since v1.6.4](#changelog-since-v164) - [Other notable changes](#other-notable-changes-8) -- [v1.7.0-alpha.3](#v170-alpha3) - - [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3) +- [v1.7.0-beta.1](#v170-beta1) + - [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2) - - [Action Required](#action-required-6) + - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4-1) + - [Action Required](#action-required-5) - [Other notable changes](#other-notable-changes-9) -- [v1.5.7](#v157) - - [Downloads for v1.5.7](#downloads-for-v157) +- [v1.6.4](#v164) + - [Known Issues for v1.6.4](#known-issues-for-v164) + - [Downloads for v1.6.4](#downloads-for-v164) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.5.6](#changelog-since-v156) + - [Changelog since v1.6.3](#changelog-since-v163) - [Other notable changes](#other-notable-changes-10) -- [v1.4.12](#v1412) - - [Downloads for v1.4.12](#downloads-for-v1412) +- [v1.7.0-alpha.4](#v170-alpha4) + - [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - - [Changelog since v1.4.9](#changelog-since-v149) + - [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3) + - [Action Required](#action-required-6) - [Other notable changes](#other-notable-changes-11) -- [v1.7.0-alpha.2](#v170-alpha2) - - [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2) +- [v1.6.3](#v163) + - [Known Issues for v1.6.3](#known-issues-for-v163) + - [Downloads for v1.6.3](#downloads-for-v163) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - - [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1) - - [Action Required](#action-required-7) + - [Node Binaries](#node-binaries-12) + - [Changelog since v1.6.2](#changelog-since-v162) - [Other notable changes](#other-notable-changes-12) -- [v1.6.2](#v162) - - [Downloads for v1.6.2](#downloads-for-v162) +- [v1.7.0-alpha.3](#v170-alpha3) + - [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - - [Changelog since v1.6.1](#changelog-since-v161) + - [Node Binaries](#node-binaries-13) + - [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2) + - [Action Required](#action-required-7) - [Other notable changes](#other-notable-changes-13) -- [v1.7.0-alpha.1](#v170-alpha1) - - [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1) +- [v1.5.7](#v157) + - [Downloads for v1.5.7](#downloads-for-v157) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - - [Changelog since v1.6.0](#changelog-since-v160) + - [Node Binaries](#node-binaries-14) + - [Changelog since v1.5.6](#changelog-since-v156) - [Other notable changes](#other-notable-changes-14) -- [v1.6.1](#v161) - - [Downloads for v1.6.1](#downloads-for-v161) +- [v1.4.12](#v1412) + - [Downloads for v1.4.12](#downloads-for-v1412) - [Client Binaries](#client-binaries-15) - [Server Binaries](#server-binaries-15) - - [Changelog since v1.6.0](#changelog-since-v160-1) + - [Node Binaries](#node-binaries-15) + - [Changelog since v1.4.9](#changelog-since-v149) - [Other notable changes](#other-notable-changes-15) -- [v1.6.0](#v160) - - [Downloads for v1.6.0](#downloads-for-v160) +- [v1.7.0-alpha.2](#v170-alpha2) + - [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2) - [Client Binaries](#client-binaries-16) - [Server Binaries](#server-binaries-16) + - [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1) + - [Action Required](#action-required-8) + - [Other notable changes](#other-notable-changes-16) +- [v1.6.2](#v162) + - [Downloads for v1.6.2](#downloads-for-v162) + - [Client Binaries](#client-binaries-17) + - [Server Binaries](#server-binaries-17) + - [Changelog since v1.6.1](#changelog-since-v161) + - [Other notable changes](#other-notable-changes-17) +- [v1.7.0-alpha.1](#v170-alpha1) + - [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1) + - [Client Binaries](#client-binaries-18) + - [Server Binaries](#server-binaries-18) + - [Changelog since v1.6.0](#changelog-since-v160) + - [Other notable changes](#other-notable-changes-18) +- [v1.6.1](#v161) + - [Downloads for v1.6.1](#downloads-for-v161) + - [Client Binaries](#client-binaries-19) + - [Server Binaries](#server-binaries-19) + - [Changelog since v1.6.0](#changelog-since-v160-1) + - [Other notable changes](#other-notable-changes-19) +- [v1.6.0](#v160) + - [Downloads for v1.6.0](#downloads-for-v160) + - [Client Binaries](#client-binaries-20) + - [Server Binaries](#server-binaries-20) - [WARNING: etcd backup strongly recommended](#warning:-etcd-backup-strongly-recommended) - [Major updates and release themes](#major-updates-and-release-themes) - - [Action Required](#action-required-8) + - [Action Required](#action-required-9) - [Certificates API](#certificates-api) - [Cluster Autoscaler](#cluster-autoscaler) - [Deployment](#deployment) - [Federation](#federation) - - [Internal Storage Layer](#internal-storage-layer) - - [Node Components](#node-components) + - [Internal Storage Layer](#internal-storage-layer-1) + - [Node Components](#node-components-2) - [kubectl](#kubectl) - [RBAC](#rbac) - - [Scheduling](#scheduling) + - [Scheduling](#scheduling-3) - [Service](#service) - - [StatefulSet](#statefulset) + - [StatefulSet](#statefulset-1) - [Volumes](#volumes) - - [Notable Features](#notable-features) + - [Notable Features](#notable-features-1) - [Autoscaling](#autoscaling) - - [DaemonSet](#daemonset) + - [DaemonSet](#daemonset-2) - [Deployment](#deployment-1) - [Federation](#federation-1) - - [Internal Storage Layer](#internal-storage-layer-1) - - [kubeadm](#kubeadm) - - [Node Components](#node-components-1) + - [Internal Storage Layer](#internal-storage-layer-2) + - [kubeadm](#kubeadm-2) + - [Node Components](#node-components-3) - [RBAC](#rbac-1) - - [Scheduling](#scheduling-1) + - [Scheduling](#scheduling-4) - [Service Catalog](#service-catalog) - [Volumes](#volumes-1) - - [Deprecations](#deprecations) - - [Cluster Provisioning Scripts](#cluster-provisioning-scripts) - - [kubeadm](#kubeadm-1) + - [Deprecations](#deprecations-1) + - [Cluster Provisioning Scripts](#cluster-provisioning-scripts-1) + - [kubeadm](#kubeadm-3) - [Other Deprecations](#other-deprecations) - [Changes to API Resources](#changes-to-api-resources) - [ABAC](#abac) - - [Admission Control](#admission-control) + - [Admission Control](#admission-control-1) - [Authentication](#authentication) - [Authorization](#authorization) - [Autoscaling](#autoscaling-1) - [Certificates](#certificates) - [ConfigMap](#configmap) - [CronJob](#cronjob) - - [DaemonSet](#daemonset-1) + - [DaemonSet](#daemonset-3) - [Deployment](#deployment-2) - [Node](#node) - [Pod](#pod) @@ -171,7 +279,7 @@ - [ReplicaSet](#replicaset) - [Secrets](#secrets) - [Service](#service-1) - - [StatefulSet](#statefulset-1) + - [StatefulSet](#statefulset-2) - [Taints and Tolerations](#taints-and-tolerations) - [Volumes](#volumes-2) - [Changes to Major Components](#changes-to-major-components) @@ -187,10 +295,10 @@ - [GKE](#gke) - [vSphere](#vsphere) - [Federation](#federation-2) - - [kubefed](#kubefed) - - [Other Notable Changes](#other-notable-changes-16) + - [kubefed](#kubefed-1) + - [Other Notable Changes](#other-notable-changes-20) - [Garbage Collector](#garbage-collector) - - [kubeadm](#kubeadm-2) + - [kubeadm](#kubeadm-4) - [kubectl](#kubectl-1) - [New Commands](#new-commands) - [Create subcommands](#create-subcommands) @@ -198,20 +306,20 @@ - [Updates to apply](#updates-to-apply) - [Updates to edit](#updates-to-edit) - [Bug fixes](#bug-fixes) - - [Other Notable Changes](#other-notable-changes-17) - - [Node Components](#node-components-2) + - [Other Notable Changes](#other-notable-changes-21) + - [Node Components](#node-components-4) - [Bug fixes](#bug-fixes-1) - [kube-controller-manager](#kube-controller-manager) - - [kube-dns](#kube-dns) - - [kube-proxy](#kube-proxy) + - [kube-dns](#kube-dns-1) + - [kube-proxy](#kube-proxy-2) - [Scheduler](#scheduler) - - [Volume Plugins](#volume-plugins) + - [Volume Plugins](#volume-plugins-1) - [Azure Disk](#azure-disk) - [GlusterFS](#glusterfs) - [Photon](#photon) - [rbd](#rbd) - [vSphere](#vsphere-1) - - [Other Notable Changes](#other-notable-changes-18) + - [Other Notable Changes](#other-notable-changes-22) - [Changes to Cluster Provisioning Scripts](#changes-to-cluster-provisioning-scripts) - [AWS](#aws-1) - [Juju](#juju) @@ -219,7 +327,7 @@ - [GCE](#gce-1) - [OpenStack](#openstack) - [Container Images](#container-images) - - [Other Notable Changes](#other-notable-changes-19) + - [Other Notable Changes](#other-notable-changes-23) - [Changes to Addons](#changes-to-addons) - [Dashboard](#dashboard) - [DNS](#dns) @@ -230,229 +338,229 @@ - [Fluentd](#fluentd) - [Heapster](#heapster) - [Registry](#registry) - - [External Dependency Version Information](#external-dependency-version-information) + - [External Dependency Version Information](#external-dependency-version-information-1) - [Changelog since v1.6.0-rc.1](#changelog-since-v160-rc1) - [Previous Releases Included in v1.6.0](#previous-releases-included-in-v160) - [v1.5.6](#v156) - [Downloads for v1.5.6](#downloads-for-v156) - - [Client Binaries](#client-binaries-17) - - [Server Binaries](#server-binaries-17) + - [Client Binaries](#client-binaries-21) + - [Server Binaries](#server-binaries-21) - [Changelog since v1.5.5](#changelog-since-v155) - - [Other notable changes](#other-notable-changes-20) + - [Other notable changes](#other-notable-changes-24) - [v1.6.0-rc.1](#v160-rc1) - [Downloads for v1.6.0-rc.1](#downloads-for-v160-rc1) - - [Client Binaries](#client-binaries-18) - - [Server Binaries](#server-binaries-18) + - [Client Binaries](#client-binaries-22) + - [Server Binaries](#server-binaries-22) - [Changelog since v1.6.0-beta.4](#changelog-since-v160-beta4) - - [Other notable changes](#other-notable-changes-21) + - [Other notable changes](#other-notable-changes-25) - [v1.5.5](#v155) - [Downloads for v1.5.5](#downloads-for-v155) - - [Client Binaries](#client-binaries-19) - - [Server Binaries](#server-binaries-19) + - [Client Binaries](#client-binaries-23) + - [Server Binaries](#server-binaries-23) - [Changelog since v1.5.4](#changelog-since-v154) - [v1.6.0-beta.4](#v160-beta4) - [Downloads for v1.6.0-beta.4](#downloads-for-v160-beta4) - - [Client Binaries](#client-binaries-20) - - [Server Binaries](#server-binaries-20) - - [Changelog since v1.6.0-beta.3](#changelog-since-v160-beta3) - - [Other notable changes](#other-notable-changes-22) -- [v1.6.0-beta.3](#v160-beta3) - - [Downloads for v1.6.0-beta.3](#downloads-for-v160-beta3) - - [Client Binaries](#client-binaries-21) - - [Server Binaries](#server-binaries-21) - - [Changelog since v1.6.0-beta.2](#changelog-since-v160-beta2) - - [Other notable changes](#other-notable-changes-23) -- [v1.6.0-beta.2](#v160-beta2) - - [Downloads for v1.6.0-beta.2](#downloads-for-v160-beta2) - - [Client Binaries](#client-binaries-22) - - [Server Binaries](#server-binaries-22) - - [Changelog since v1.6.0-beta.1](#changelog-since-v160-beta1) - - [Action Required](#action-required-9) - - [Other notable changes](#other-notable-changes-24) -- [v1.5.4](#v154) - - [Downloads for v1.5.4](#downloads-for-v154) - - [Client Binaries](#client-binaries-23) - - [Server Binaries](#server-binaries-23) - - [Changelog since v1.5.3](#changelog-since-v153) - - [Other notable changes](#other-notable-changes-25) -- [v1.6.0-beta.1](#v160-beta1) - - [Downloads for v1.6.0-beta.1](#downloads-for-v160-beta1) - [Client Binaries](#client-binaries-24) - [Server Binaries](#server-binaries-24) - - [Changelog since v1.6.0-alpha.3](#changelog-since-v160-alpha3) - - [Action Required](#action-required-10) + - [Changelog since v1.6.0-beta.3](#changelog-since-v160-beta3) - [Other notable changes](#other-notable-changes-26) -- [v1.6.0-alpha.3](#v160-alpha3) - - [Downloads for v1.6.0-alpha.3](#downloads-for-v160-alpha3) +- [v1.6.0-beta.3](#v160-beta3) + - [Downloads for v1.6.0-beta.3](#downloads-for-v160-beta3) - [Client Binaries](#client-binaries-25) - [Server Binaries](#server-binaries-25) - - [Changelog since v1.6.0-alpha.2](#changelog-since-v160-alpha2) + - [Changelog since v1.6.0-beta.2](#changelog-since-v160-beta2) - [Other notable changes](#other-notable-changes-27) -- [v1.4.9](#v149) - - [Downloads for v1.4.9](#downloads-for-v149) +- [v1.6.0-beta.2](#v160-beta2) + - [Downloads for v1.6.0-beta.2](#downloads-for-v160-beta2) - [Client Binaries](#client-binaries-26) - [Server Binaries](#server-binaries-26) - - [Changelog since v1.4.8](#changelog-since-v148) + - [Changelog since v1.6.0-beta.1](#changelog-since-v160-beta1) + - [Action Required](#action-required-10) - [Other notable changes](#other-notable-changes-28) -- [v1.5.3](#v153) - - [Downloads for v1.5.3](#downloads-for-v153) +- [v1.5.4](#v154) + - [Downloads for v1.5.4](#downloads-for-v154) - [Client Binaries](#client-binaries-27) - [Server Binaries](#server-binaries-27) - - [Node Binaries](#node-binaries-12) - - [Changelog since v1.5.2](#changelog-since-v152) + - [Changelog since v1.5.3](#changelog-since-v153) - [Other notable changes](#other-notable-changes-29) -- [v1.6.0-alpha.2](#v160-alpha2) - - [Downloads for v1.6.0-alpha.2](#downloads-for-v160-alpha2) +- [v1.6.0-beta.1](#v160-beta1) + - [Downloads for v1.6.0-beta.1](#downloads-for-v160-beta1) - [Client Binaries](#client-binaries-28) - [Server Binaries](#server-binaries-28) - - [Changelog since v1.6.0-alpha.1](#changelog-since-v160-alpha1) + - [Changelog since v1.6.0-alpha.3](#changelog-since-v160-alpha3) + - [Action Required](#action-required-11) - [Other notable changes](#other-notable-changes-30) -- [v1.6.0-alpha.1](#v160-alpha1) - - [Downloads for v1.6.0-alpha.1](#downloads-for-v160-alpha1) +- [v1.6.0-alpha.3](#v160-alpha3) + - [Downloads for v1.6.0-alpha.3](#downloads-for-v160-alpha3) - [Client Binaries](#client-binaries-29) - [Server Binaries](#server-binaries-29) - - [Changelog since v1.5.0](#changelog-since-v150) - - [Action Required](#action-required-11) + - [Changelog since v1.6.0-alpha.2](#changelog-since-v160-alpha2) - [Other notable changes](#other-notable-changes-31) -- [v1.5.2](#v152) - - [Downloads for v1.5.2](#downloads-for-v152) +- [v1.4.9](#v149) + - [Downloads for v1.4.9](#downloads-for-v149) - [Client Binaries](#client-binaries-30) - [Server Binaries](#server-binaries-30) - - [Changelog since v1.5.1](#changelog-since-v151) + - [Changelog since v1.4.8](#changelog-since-v148) - [Other notable changes](#other-notable-changes-32) -- [v1.4.8](#v148) - - [Downloads for v1.4.8](#downloads-for-v148) +- [v1.5.3](#v153) + - [Downloads for v1.5.3](#downloads-for-v153) - [Client Binaries](#client-binaries-31) - [Server Binaries](#server-binaries-31) - - [Changelog since v1.4.7](#changelog-since-v147) + - [Node Binaries](#node-binaries-16) + - [Changelog since v1.5.2](#changelog-since-v152) - [Other notable changes](#other-notable-changes-33) -- [v1.5.1](#v151) - - [Downloads for v1.5.1](#downloads-for-v151) +- [v1.6.0-alpha.2](#v160-alpha2) + - [Downloads for v1.6.0-alpha.2](#downloads-for-v160-alpha2) - [Client Binaries](#client-binaries-32) - [Server Binaries](#server-binaries-32) - - [Changelog since v1.5.0](#changelog-since-v150-1) + - [Changelog since v1.6.0-alpha.1](#changelog-since-v160-alpha1) - [Other notable changes](#other-notable-changes-34) +- [v1.6.0-alpha.1](#v160-alpha1) + - [Downloads for v1.6.0-alpha.1](#downloads-for-v160-alpha1) + - [Client Binaries](#client-binaries-33) + - [Server Binaries](#server-binaries-33) + - [Changelog since v1.5.0](#changelog-since-v150) + - [Action Required](#action-required-12) + - [Other notable changes](#other-notable-changes-35) +- [v1.5.2](#v152) + - [Downloads for v1.5.2](#downloads-for-v152) + - [Client Binaries](#client-binaries-34) + - [Server Binaries](#server-binaries-34) + - [Changelog since v1.5.1](#changelog-since-v151) + - [Other notable changes](#other-notable-changes-36) +- [v1.4.8](#v148) + - [Downloads for v1.4.8](#downloads-for-v148) + - [Client Binaries](#client-binaries-35) + - [Server Binaries](#server-binaries-35) + - [Changelog since v1.4.7](#changelog-since-v147) + - [Other notable changes](#other-notable-changes-37) +- [v1.5.1](#v151) + - [Downloads for v1.5.1](#downloads-for-v151) + - [Client Binaries](#client-binaries-36) + - [Server Binaries](#server-binaries-36) + - [Changelog since v1.5.0](#changelog-since-v150-1) + - [Other notable changes](#other-notable-changes-38) - [Known Issues for v1.5.1](#known-issues-for-v151) - [v1.5.0](#v150) - [Downloads for v1.5.0](#downloads-for-v150) - - [Client Binaries](#client-binaries-33) - - [Server Binaries](#server-binaries-33) - - [Major Themes](#major-themes) + - [Client Binaries](#client-binaries-37) + - [Server Binaries](#server-binaries-37) + - [Major Themes](#major-themes-1) - [Features](#features) - - [Known Issues](#known-issues) + - [Known Issues](#known-issues-1) - [Notable Changes to Existing Behavior](#notable-changes-to-existing-behavior) - - [Deprecations](#deprecations-1) - - [Action Required Before Upgrading](#action-required-before-upgrading) - - [External Dependency Version Information](#external-dependency-version-information-1) + - [Deprecations](#deprecations-2) + - [Action Required Before Upgrading](#action-required-before-upgrading-1) + - [External Dependency Version Information](#external-dependency-version-information-2) - [Changelog since v1.5.0-beta.3](#changelog-since-v150-beta3) - - [Other notable changes](#other-notable-changes-35) + - [Other notable changes](#other-notable-changes-39) - [Previous Releases Included in v1.5.0](#previous-releases-included-in-v150) - [v1.4.7](#v147) - [Downloads for v1.4.7](#downloads-for-v147) - - [Client Binaries](#client-binaries-34) - - [Server Binaries](#server-binaries-34) - - [Changelog since v1.4.6](#changelog-since-v146) - - [Other notable changes](#other-notable-changes-36) -- [v1.5.0-beta.3](#v150-beta3) - - [Downloads for v1.5.0-beta.3](#downloads-for-v150-beta3) - - [Client Binaries](#client-binaries-35) - - [Server Binaries](#server-binaries-35) - - [Changelog since v1.5.0-beta.2](#changelog-since-v150-beta2) - - [Other notable changes](#other-notable-changes-37) -- [v1.5.0-beta.2](#v150-beta2) - - [Downloads for v1.5.0-beta.2](#downloads-for-v150-beta2) - - [Client Binaries](#client-binaries-36) - - [Server Binaries](#server-binaries-36) - - [Changelog since v1.5.0-beta.1](#changelog-since-v150-beta1) - - [Other notable changes](#other-notable-changes-38) -- [v1.5.0-beta.1](#v150-beta1) - - [Downloads for v1.5.0-beta.1](#downloads-for-v150-beta1) - - [Client Binaries](#client-binaries-37) - - [Server Binaries](#server-binaries-37) - - [Changelog since v1.5.0-alpha.2](#changelog-since-v150-alpha2) - - [Action Required](#action-required-12) - - [Other notable changes](#other-notable-changes-39) -- [v1.4.6](#v146) - - [Downloads for v1.4.6](#downloads-for-v146) - [Client Binaries](#client-binaries-38) - [Server Binaries](#server-binaries-38) - - [Changelog since v1.4.5](#changelog-since-v145) + - [Changelog since v1.4.6](#changelog-since-v146) - [Other notable changes](#other-notable-changes-40) -- [v1.3.10](#v1310) - - [Downloads for v1.3.10](#downloads-for-v1310) +- [v1.5.0-beta.3](#v150-beta3) + - [Downloads for v1.5.0-beta.3](#downloads-for-v150-beta3) - [Client Binaries](#client-binaries-39) - [Server Binaries](#server-binaries-39) - - [Changelog since v1.3.9](#changelog-since-v139) + - [Changelog since v1.5.0-beta.2](#changelog-since-v150-beta2) - [Other notable changes](#other-notable-changes-41) -- [v1.4.5](#v145) - - [Downloads for v1.4.5](#downloads-for-v145) +- [v1.5.0-beta.2](#v150-beta2) + - [Downloads for v1.5.0-beta.2](#downloads-for-v150-beta2) - [Client Binaries](#client-binaries-40) - [Server Binaries](#server-binaries-40) - - [Changelog since v1.4.4](#changelog-since-v144) + - [Changelog since v1.5.0-beta.1](#changelog-since-v150-beta1) - [Other notable changes](#other-notable-changes-42) -- [v1.5.0-alpha.2](#v150-alpha2) - - [Downloads for v1.5.0-alpha.2](#downloads-for-v150-alpha2) +- [v1.5.0-beta.1](#v150-beta1) + - [Downloads for v1.5.0-beta.1](#downloads-for-v150-beta1) - [Client Binaries](#client-binaries-41) - [Server Binaries](#server-binaries-41) - - [Changelog since v1.5.0-alpha.1](#changelog-since-v150-alpha1) + - [Changelog since v1.5.0-alpha.2](#changelog-since-v150-alpha2) - [Action Required](#action-required-13) - [Other notable changes](#other-notable-changes-43) -- [v1.2.7](#v127) - - [Downloads for v1.2.7](#downloads-for-v127) +- [v1.4.6](#v146) + - [Downloads for v1.4.6](#downloads-for-v146) - [Client Binaries](#client-binaries-42) - [Server Binaries](#server-binaries-42) - - [Changelog since v1.2.6](#changelog-since-v126) + - [Changelog since v1.4.5](#changelog-since-v145) - [Other notable changes](#other-notable-changes-44) -- [v1.4.4](#v144) - - [Downloads for v1.4.4](#downloads-for-v144) +- [v1.3.10](#v1310) + - [Downloads for v1.3.10](#downloads-for-v1310) - [Client Binaries](#client-binaries-43) - [Server Binaries](#server-binaries-43) - - [Changelog since v1.4.3](#changelog-since-v143) + - [Changelog since v1.3.9](#changelog-since-v139) - [Other notable changes](#other-notable-changes-45) +- [v1.4.5](#v145) + - [Downloads for v1.4.5](#downloads-for-v145) + - [Client Binaries](#client-binaries-44) + - [Server Binaries](#server-binaries-44) + - [Changelog since v1.4.4](#changelog-since-v144) + - [Other notable changes](#other-notable-changes-46) +- [v1.5.0-alpha.2](#v150-alpha2) + - [Downloads for v1.5.0-alpha.2](#downloads-for-v150-alpha2) + - [Client Binaries](#client-binaries-45) + - [Server Binaries](#server-binaries-45) + - [Changelog since v1.5.0-alpha.1](#changelog-since-v150-alpha1) + - [Action Required](#action-required-14) + - [Other notable changes](#other-notable-changes-47) +- [v1.2.7](#v127) + - [Downloads for v1.2.7](#downloads-for-v127) + - [Client Binaries](#client-binaries-46) + - [Server Binaries](#server-binaries-46) + - [Changelog since v1.2.6](#changelog-since-v126) + - [Other notable changes](#other-notable-changes-48) +- [v1.4.4](#v144) + - [Downloads for v1.4.4](#downloads-for-v144) + - [Client Binaries](#client-binaries-47) + - [Server Binaries](#server-binaries-47) + - [Changelog since v1.4.3](#changelog-since-v143) + - [Other notable changes](#other-notable-changes-49) - [v1.3.9](#v139) - [Downloads](#downloads) - [Changelog since v1.3.8](#changelog-since-v138) - - [Other notable changes](#other-notable-changes-46) + - [Other notable changes](#other-notable-changes-50) - [v1.4.3](#v143) - [Downloads](#downloads-1) - [Changelog since v1.4.2-beta.1](#changelog-since-v142-beta1) - - [Other notable changes](#other-notable-changes-47) + - [Other notable changes](#other-notable-changes-51) - [v1.4.2](#v142) - [Downloads](#downloads-2) - [Changelog since v1.4.2-beta.1](#changelog-since-v142-beta1-1) - - [Other notable changes](#other-notable-changes-48) + - [Other notable changes](#other-notable-changes-52) - [v1.5.0-alpha.1](#v150-alpha1) - [Downloads](#downloads-3) - [Changelog since v1.4.0-alpha.3](#changelog-since-v140-alpha3) - [Experimental Features](#experimental-features) - - [Action Required](#action-required-14) - - [Other notable changes](#other-notable-changes-49) + - [Action Required](#action-required-15) + - [Other notable changes](#other-notable-changes-53) - [v1.4.2-beta.1](#v142-beta1) - [Downloads](#downloads-4) - [Changelog since v1.4.1](#changelog-since-v141) - - [Other notable changes](#other-notable-changes-50) + - [Other notable changes](#other-notable-changes-54) - [v1.4.1](#v141) - [Downloads](#downloads-5) - [Changelog since v1.4.1-beta.2](#changelog-since-v141-beta2) - [v1.4.1-beta.2](#v141-beta2) - [Downloads](#downloads-6) - [Changelog since v1.4.0](#changelog-since-v140) - - [Other notable changes](#other-notable-changes-51) + - [Other notable changes](#other-notable-changes-55) - [v1.3.8](#v138) - [Downloads](#downloads-7) - [Changelog since v1.3.7](#changelog-since-v137) - - [Other notable changes](#other-notable-changes-52) + - [Other notable changes](#other-notable-changes-56) - [v1.4.0](#v140) - [Downloads](#downloads-8) - - [Major Themes](#major-themes-1) + - [Major Themes](#major-themes-2) - [Features](#features-1) - - [Known Issues](#known-issues-1) + - [Known Issues](#known-issues-2) - [Notable Changes to Existing Behavior](#notable-changes-to-existing-behavior-1) - - [Deployments](#deployments) + - [Deployments](#deployments-1) - [kubectl rolling-update: < v1.4.0 client vs >=v1.4.0 cluster](#kubectl-rolling-update:-<-v140-client-vs->=v140-cluster) - [kubectl delete: < v1.4.0 client vs >=v1.4.0 cluster](#kubectl-delete:-<-v140-client-vs->=v140-cluster) - [DELETE operation in REST API](#delete-operation-in-rest-api) - - [Action Required Before Upgrading](#action-required-before-upgrading-1) + - [Action Required Before Upgrading](#action-required-before-upgrading-2) - [optionally, remove the old secret](#optionally-remove-the-old-secret) - [Previous Releases Included in v1.4.0](#previous-releases-included-in-v140) - [v1.4.0-beta.11](#v140-beta11) @@ -461,26 +569,26 @@ - [v1.4.0-beta.10](#v140-beta10) - [Downloads](#downloads-10) - [Changelog since v1.4.0-beta.8](#changelog-since-v140-beta8) - - [Other notable changes](#other-notable-changes-53) + - [Other notable changes](#other-notable-changes-57) - [v1.4.0-beta.8](#v140-beta8) - [Downloads](#downloads-11) - [Changelog since v1.4.0-beta.7](#changelog-since-v140-beta7) - [v1.4.0-beta.7](#v140-beta7) - [Downloads](#downloads-12) - [Changelog since v1.4.0-beta.6](#changelog-since-v140-beta6) - - [Other notable changes](#other-notable-changes-54) + - [Other notable changes](#other-notable-changes-58) - [v1.4.0-beta.6](#v140-beta6) - [Downloads](#downloads-13) - [Changelog since v1.4.0-beta.5](#changelog-since-v140-beta5) - - [Other notable changes](#other-notable-changes-55) + - [Other notable changes](#other-notable-changes-59) - [v1.4.0-beta.5](#v140-beta5) - [Downloads](#downloads-14) - [Changelog since v1.4.0-beta.3](#changelog-since-v140-beta3) - - [Other notable changes](#other-notable-changes-56) + - [Other notable changes](#other-notable-changes-60) - [v1.3.7](#v137) - [Downloads](#downloads-15) - [Changelog since v1.3.6](#changelog-since-v136) - - [Other notable changes](#other-notable-changes-57) + - [Other notable changes](#other-notable-changes-61) - [v1.4.0-beta.3](#v140-beta3) - [Downloads](#downloads-16) - [Changelog since v1.4.0-beta.2](#changelog-since-v140-beta2) @@ -491,57 +599,57 @@ - [v1.4.0-beta.2](#v140-beta2) - [Downloads](#downloads-17) - [Changelog since v1.4.0-beta.1](#changelog-since-v140-beta1) - - [Other notable changes](#other-notable-changes-58) + - [Other notable changes](#other-notable-changes-62) - [v1.4.0-beta.1](#v140-beta1) - [Downloads](#downloads-18) - [Changelog since v1.4.0-alpha.3](#changelog-since-v140-alpha3-1) - - [Action Required](#action-required-15) - - [Other notable changes](#other-notable-changes-59) + - [Action Required](#action-required-16) + - [Other notable changes](#other-notable-changes-63) - [v1.3.6](#v136) - [Downloads](#downloads-19) - [Changelog since v1.3.5](#changelog-since-v135) - - [Other notable changes](#other-notable-changes-60) + - [Other notable changes](#other-notable-changes-64) - [v1.4.0-alpha.3](#v140-alpha3) - [Downloads](#downloads-20) - [Changelog since v1.4.0-alpha.2](#changelog-since-v140-alpha2) - - [Action Required](#action-required-16) - - [Other notable changes](#other-notable-changes-61) + - [Action Required](#action-required-17) + - [Other notable changes](#other-notable-changes-65) - [v1.3.5](#v135) - [Downloads](#downloads-21) - [Changelog since v1.3.4](#changelog-since-v134) - - [Other notable changes](#other-notable-changes-62) + - [Other notable changes](#other-notable-changes-66) - [v1.3.4](#v134) - [Downloads](#downloads-22) - [Changelog since v1.3.3](#changelog-since-v133) - - [Other notable changes](#other-notable-changes-63) + - [Other notable changes](#other-notable-changes-67) - [v1.4.0-alpha.2](#v140-alpha2) - [Downloads](#downloads-23) - [Changelog since v1.4.0-alpha.1](#changelog-since-v140-alpha1) - - [Action Required](#action-required-17) - - [Other notable changes](#other-notable-changes-64) + - [Action Required](#action-required-18) + - [Other notable changes](#other-notable-changes-68) - [v1.3.3](#v133) - [Downloads](#downloads-24) - [Changelog since v1.3.2](#changelog-since-v132) - - [Other notable changes](#other-notable-changes-65) - - [Known Issues](#known-issues-2) + - [Other notable changes](#other-notable-changes-69) + - [Known Issues](#known-issues-3) - [v1.3.2](#v132) - [Downloads](#downloads-25) - [Changelog since v1.3.1](#changelog-since-v131) - - [Other notable changes](#other-notable-changes-66) + - [Other notable changes](#other-notable-changes-70) - [v1.3.1](#v131) - [Downloads](#downloads-26) - [Changelog since v1.3.0](#changelog-since-v130) - - [Other notable changes](#other-notable-changes-67) + - [Other notable changes](#other-notable-changes-71) - [v1.2.6](#v126) - [Downloads](#downloads-27) - [Changelog since v1.2.5](#changelog-since-v125) - - [Other notable changes](#other-notable-changes-68) + - [Other notable changes](#other-notable-changes-72) - [v1.4.0-alpha.1](#v140-alpha1) - [Downloads](#downloads-28) - [Changelog since v1.3.0](#changelog-since-v130-1) - [Experimental Features](#experimental-features-1) - - [Action Required](#action-required-18) - - [Other notable changes](#other-notable-changes-69) + - [Action Required](#action-required-19) + - [Other notable changes](#other-notable-changes-73) - [v1.3.0](#v130) - [Downloads](#downloads-29) - [Highlights](#highlights) @@ -556,71 +664,71 @@ - [v1.3.0-beta.3](#v130-beta3) - [Downloads](#downloads-30) - [Changelog since v1.3.0-beta.2](#changelog-since-v130-beta2) - - [Action Required](#action-required-19) - - [Other notable changes](#other-notable-changes-70) + - [Action Required](#action-required-20) + - [Other notable changes](#other-notable-changes-74) - [v1.2.5](#v125) - [Downloads](#downloads-31) - [Changes since v1.2.4](#changes-since-v124) - - [Other notable changes](#other-notable-changes-71) + - [Other notable changes](#other-notable-changes-75) - [v1.3.0-beta.2](#v130-beta2) - [Downloads](#downloads-32) - [Changes since v1.3.0-beta.1](#changes-since-v130-beta1) - [Experimental Features](#experimental-features-2) - - [Other notable changes](#other-notable-changes-72) + - [Other notable changes](#other-notable-changes-76) - [v1.3.0-beta.1](#v130-beta1) - [Downloads](#downloads-33) - [Changes since v1.3.0-alpha.5](#changes-since-v130-alpha5) - - [Action Required](#action-required-20) - - [Other notable changes](#other-notable-changes-73) + - [Action Required](#action-required-21) + - [Other notable changes](#other-notable-changes-77) - [v1.3.0-alpha.5](#v130-alpha5) - [Downloads](#downloads-34) - [Changes since v1.3.0-alpha.4](#changes-since-v130-alpha4) - - [Action Required](#action-required-21) - - [Other notable changes](#other-notable-changes-74) + - [Action Required](#action-required-22) + - [Other notable changes](#other-notable-changes-78) - [v1.3.0-alpha.4](#v130-alpha4) - [Downloads](#downloads-35) - [Changes since v1.3.0-alpha.3](#changes-since-v130-alpha3) - - [Action Required](#action-required-22) - - [Other notable changes](#other-notable-changes-75) + - [Action Required](#action-required-23) + - [Other notable changes](#other-notable-changes-79) - [v1.2.4](#v124) - [Downloads](#downloads-36) - [Changes since v1.2.3](#changes-since-v123) - - [Other notable changes](#other-notable-changes-76) + - [Other notable changes](#other-notable-changes-80) - [v1.3.0-alpha.3](#v130-alpha3) - [Downloads](#downloads-37) - [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2) - - [Action Required](#action-required-23) - - [Other notable changes](#other-notable-changes-77) + - [Action Required](#action-required-24) + - [Other notable changes](#other-notable-changes-81) - [v1.2.3](#v123) - [Downloads](#downloads-38) - [Changes since v1.2.2](#changes-since-v122) - - [Action Required](#action-required-24) - - [Other notable changes](#other-notable-changes-78) + - [Action Required](#action-required-25) + - [Other notable changes](#other-notable-changes-82) - [v1.3.0-alpha.2](#v130-alpha2) - [Downloads](#downloads-39) - [Changes since v1.3.0-alpha.1](#changes-since-v130-alpha1) - - [Other notable changes](#other-notable-changes-79) + - [Other notable changes](#other-notable-changes-83) - [v1.2.2](#v122) - [Downloads](#downloads-40) - [Changes since v1.2.1](#changes-since-v121) - - [Other notable changes](#other-notable-changes-80) + - [Other notable changes](#other-notable-changes-84) - [v1.2.1](#v121) - [Downloads](#downloads-41) - [Changes since v1.2.0](#changes-since-v120) - - [Other notable changes](#other-notable-changes-81) + - [Other notable changes](#other-notable-changes-85) - [v1.3.0-alpha.1](#v130-alpha1) - [Downloads](#downloads-42) - [Changes since v1.2.0](#changes-since-v120-1) - - [Action Required](#action-required-25) - - [Other notable changes](#other-notable-changes-82) + - [Action Required](#action-required-26) + - [Other notable changes](#other-notable-changes-86) - [v1.2.0](#v120) - [Downloads](#downloads-43) - [Changes since v1.1.1](#changes-since-v111) - - [Major Themes](#major-themes-2) + - [Major Themes](#major-themes-3) - [Other notable improvements](#other-notable-improvements) - [Experimental Features](#experimental-features-3) - - [Action required](#action-required-26) - - [Known Issues](#known-issues-3) + - [Action required](#action-required-27) + - [Known Issues](#known-issues-4) - [Docker Known Issues](#docker-known-issues) - [1.9.1](#191) - [Provider-specific Notes](#provider-specific-notes-1) @@ -632,6 +740,1409 @@ +# v1.7.1 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) + +## Downloads for v1.7.1 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes.tar.gz) | `76bddfd19a50f92136456af5bbc3a9d4239260c0c40dccfe704156286a93127c` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-src.tar.gz) | `159100f6506c4d59d640a3b0fc7691c4a5023b346d7c3911c5cbbedce2ad8184` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-darwin-386.tar.gz) | `340ceb858bff489fa7ae15c6b526c4316d9c7b6ca354f68ff187c8b5eff08f45` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-darwin-amd64.tar.gz) | `1f1db50d57750115abd6e6e060c914292af7a6e2933a48ccf28ebbe8942c7826` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-386.tar.gz) | `5eac1c92aee40cd2ef14248639d39d7cee910f077dd006a868c510116852fbba` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-amd64.tar.gz) | `6b807520a69b8432baaa89304e8d1ff286d07af20e2a3712b8b2e38d61dbb445` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-arm64.tar.gz) | `a91e0ea4381f659f60380b5b9d6f8114e13337f90a32bcb4a72b8168caef2e00` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-arm.tar.gz) | `6e0e2e557d4e3df18e967e6025a36205aae5b8979dcbb33df6d6e44d9224809a` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-ppc64le.tar.gz) | `22264e96ceaa2d853120be7dcbdc70a9938915cd10eaf5a2c75f4fb2dd12a2eb` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-linux-s390x.tar.gz) | `9b5ac9a66df99a2a8abdc908ef3cd933010facf4c08e96597e041fc359a62aa9` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-windows-386.tar.gz) | `bd3f99ead21f6c6c34dba7ef5c2d2308ef6770bcb255f286d9d5edbf33f5ccff` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-client-windows-amd64.tar.gz) | `e2578ca743bf03b367c473c32657cbed4cf27a12545841058f8bb873fb70e872` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-server-linux-amd64.tar.gz) | `467201c89d473bdec82a67c9b24453a2037eef1a1ed552f0dc55310355d21ea3` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-server-linux-arm64.tar.gz) | `1c1c5cad62423655b1e79bc831de5765cbe683aeef4efe9a823d2597334e19c1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-server-linux-arm.tar.gz) | `17eee900df8ac9bbdd047b2f7d7cb2684820f71cb700dcb305e986acbddf66eb` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-server-linux-ppc64le.tar.gz) | `b1ae5f6d728cfe61b38acbc081e66ddf77ecc38ebdfdb42bfdd53e51fcd3aa2b` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-server-linux-s390x.tar.gz) | `20a273b20b10233fc2632d8a65e0b123fc87166e1f50171e7ede76c59f3118cd` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-linux-amd64.tar.gz) | `da0e6d5d6532ef7dba6e5db59e5bc142a52a0314bbb2c70e1fa8e73fe07d0e31` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-linux-arm64.tar.gz) | `939b6f779257671a141ecb243bc01e9a5dfb1cd05808820044d915049c3f591a` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-linux-arm.tar.gz) | `512fddbbb7353d6dd02e51e79e05101ab857c09e4a4970404258c783ab094c95` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-linux-ppc64le.tar.gz) | `795150d92ef93aa53be2db245b9f88cc40fe0fd27045835a23c8eee830c419ba` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-linux-s390x.tar.gz) | `58c9b1ef8f8b30fd7061ac87e60b7be9eb79b5bd50c2eef1564838768e7b1d02` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.1/kubernetes-node-windows-amd64.tar.gz) | `eae772609aa50d6a1f4f7cf6df5df2f56cbd438b9034f9be622bc0cfe1d13072` + +## Changelog since v1.7.0 + +### Other notable changes + +* Added new flag to `kubeadm init`: --node-name, that lets you specify the name of the Node object that will be created ([#48594](https://github.com/kubernetes/kubernetes/pull/48594), [@GheRivero](https://github.com/GheRivero)) +* Added new flag to `kubeadm join`: --node-name, that lets you specify the name of the Node object that's gonna be created ([#48538](https://github.com/kubernetes/kubernetes/pull/48538), [@GheRivero](https://github.com/GheRivero)) +* Fixes issue where you could not mount NFS or glusterFS volumes using hostnames on GCI/GKE with COS images. ([#42376](https://github.com/kubernetes/kubernetes/pull/42376), [@jingxu97](https://github.com/jingxu97)) +* Reduce amount of noise in Stackdriver Logging, generated by the event-exporter component in the fluentd-gcp addon. ([#48712](https://github.com/kubernetes/kubernetes/pull/48712), [@crassirostris](https://github.com/crassirostris)) +* Add generic NoSchedule toleration to fluentd in gcp config. ([#48182](https://github.com/kubernetes/kubernetes/pull/48182), [@gmarek](https://github.com/gmarek)) +* RBAC role and role-binding reconciliation now ensures namespaces exist when reconciling on startup. ([#48480](https://github.com/kubernetes/kubernetes/pull/48480), [@liggitt](https://github.com/liggitt)) +* Support NoSchedule taints correctly in DaemonSet controller. ([#48189](https://github.com/kubernetes/kubernetes/pull/48189), [@mikedanese](https://github.com/mikedanese)) +* kubeadm: Expose only the cluster-info ConfigMap in the kube-public ns ([#48050](https://github.com/kubernetes/kubernetes/pull/48050), [@luxas](https://github.com/luxas)) + + + +# v1.8.0-alpha.2 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) + +## Downloads for v1.8.0-alpha.2 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes.tar.gz) | `26d8079fa6b2d82682db809827d260bbab8e6d0f45e457260b8c5ce640432426` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-src.tar.gz) | `141e5c1bf66b69f3c22870b2ab6159abc3b38c12cc20f41c8193044e16df3205` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-darwin-386.tar.gz) | `6ca63da27ca0c1efa04d079d90eba7e6f01a6e9581317892538be6a97ee64d95` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | `0bfbd97f7fb7ce5e1228134d8ca40168553d179bfa44cbd5e925a6543fb3bbf5` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-386.tar.gz) | `29d395cc61c91c602e32412e51d4eae333942e6b9da235270768d11c040733c3` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | `b1172bbb1d80ba29612d4de08dc4942b40b0f7d580dbb8ed4423c221f78920fe` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | `994621c4a9d0644e3e8a4f12f563588036412bb72f0104b888f7a2605d3a8015` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | `1e0dd9e4e9730a8cd54d8eb7036d5d7307bd930a91d0fcb105601b2d03fda15d` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | `bdcf58f419b42d83ce8adb350388c962b8934782294f9715b617cdbdf201cc36` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | `5c58217cffb34043fae951222bfd429165c68439f590c8fb8e33e54fe1cab0de` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-windows-386.tar.gz) | `f78ec125f734433c9fc75a9d35dc7bdfa6d145f1cc071ff2e3a5435beef3368f` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | `78dca9aadc140e2868b0a3d1a77b5058e22f24710f9c7956d755b473b575bb9d` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | `802bb71cf19147857a50e842a00d50641f78fec5c5791a524639f7af70f9e1d4` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | `b8f15c32320188981d5e75c474d4e826e45f59083eb66304151da112fb3052b1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | `8f800befc32d8402a581c47254db921d54caa31c50513c257b251435756918f1` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | `a406bd0aaa92633dbb43216312971164b0230ea01c77679d12b9ffc873956d0d` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | `8e038b4ccdfc89a08204927c8097a51bd9e786a97c2f9d73fca763ebee6c2373` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | `1a9725cfb55991680fc75cb862d8a74d76f453be9e9f8ad043d62d5911ab50b9` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | `44fbdd86048bea2cb3d2d6ec1b6cb2c4ae19cb32f6df28e15392cd7f028a4350` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | `76d9d36aa182fb93aab7a01f22f7a008ad2906a6224b4c009074100676403337` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | `07327ce6fe78bbae3d34b185b54ea0204bf875df488f0293ee1271599189160d` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | `e84a8c638834c435f82560b86f1a14ec861a8fc967a7cd7055ab86526ce744d0` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | `f0f69dc70751e3be2d564aa272f7fe67e86e91c7de3034776b98faddef51a73d` + +## Changelog since v1.7.0 + +### Action Required + +* The deprecated ThirdPartyResource (TPR) API has been removed. To avoid losing your TPR data, you must [migrate to CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/migrate-third-party-resource/) before upgrading. ([#48353](https://github.com/kubernetes/kubernetes/pull/48353), [@deads2k](https://github.com/deads2k)) + +### Other notable changes + +* Removed scheduler dependencies to testapi. ([#48405](https://github.com/kubernetes/kubernetes/pull/48405), [@k82cn](https://github.com/k82cn)) +* kubeadm: Fix a bug where `kubeadm join` would wait 5 seconds without doing anything. Now `kubeadm join` executes the tasks immediately. ([#48737](https://github.com/kubernetes/kubernetes/pull/48737), [@mattmoyer](https://github.com/mattmoyer)) +* Reduce amount of noise in Stackdriver Logging, generated by the event-exporter component in the fluentd-gcp addon. ([#48712](https://github.com/kubernetes/kubernetes/pull/48712), [@crassirostris](https://github.com/crassirostris)) +* To allow the userspace proxy to work correctly on multi-interface hosts when using the non-default-route interface, you may now set the `bindAddress` configuration option to an IP address assigned to a network interface. The proxy will use that IP address for any required NAT operations instead of the IP address of the interface which has the default route. ([#48613](https://github.com/kubernetes/kubernetes/pull/48613), [@dcbw](https://github.com/dcbw)) +* Move Mesos Cloud Provider out of Kubernetes Repo ([#47232](https://github.com/kubernetes/kubernetes/pull/47232), [@gyliu513](https://github.com/gyliu513)) +* - kubeadm now can accept versions like "1.6.4" where previously it strictly required "v1.6.4" ([#48507](https://github.com/kubernetes/kubernetes/pull/48507), [@kad](https://github.com/kad)) +* kubeadm: Implementing the certificates phase fully ([#48196](https://github.com/kubernetes/kubernetes/pull/48196), [@fabriziopandini](https://github.com/fabriziopandini)) +* Added case on 'terminated-but-not-yet-deleted' for Admit. ([#48322](https://github.com/kubernetes/kubernetes/pull/48322), [@k82cn](https://github.com/k82cn)) +* `kubectl run --env` no longer supports CSV parsing. To provide multiple env vars, use the `--env` flag multiple times instead of having env vars separated by commas. E.g. `--env ONE=1 --env TWO=2` instead of `--env ONE=1,TWO=2`. ([#47460](https://github.com/kubernetes/kubernetes/pull/47460), [@mengqiy](https://github.com/mengqiy)) +* Local storage teardown fix ([#48402](https://github.com/kubernetes/kubernetes/pull/48402), [@ianchakeres](https://github.com/ianchakeres)) +* support json output for log backend of advanced audit ([#48605](https://github.com/kubernetes/kubernetes/pull/48605), [@CaoShuFeng](https://github.com/CaoShuFeng)) +* Requests with the query parameter `?watch=` are treated by the API server as a request to watch, but authorization and metrics were not correctly identifying those as watch requests, instead grouping them as list calls. ([#48583](https://github.com/kubernetes/kubernetes/pull/48583), [@smarterclayton](https://github.com/smarterclayton)) +* As part of the NetworkPolicy "v1" changes, it is also now ([#47123](https://github.com/kubernetes/kubernetes/pull/47123), [@danwinship](https://github.com/danwinship)) + * possible to update the spec field of an existing + * NetworkPolicy. (Previously you had to delete and recreate a + * NetworkPolicy if you wanted to change it.) +* Fix udp service blackhole problem when number of backends changes from 0 to non-0 ([#48524](https://github.com/kubernetes/kubernetes/pull/48524), [@freehan](https://github.com/freehan)) +* kubeadm: Make self-hosting work by using DaemonSets and split it out to a phase that can be invoked via the CLI ([#47435](https://github.com/kubernetes/kubernetes/pull/47435), [@luxas](https://github.com/luxas)) +* Added new flag to `kubeadm join`: --node-name, that lets you specify the name of the Node object that's gonna be created ([#48538](https://github.com/kubernetes/kubernetes/pull/48538), [@GheRivero](https://github.com/GheRivero)) +* Fix Audit-ID header key ([#48492](https://github.com/kubernetes/kubernetes/pull/48492), [@CaoShuFeng](https://github.com/CaoShuFeng)) +* Checked container spec when killing container. ([#48194](https://github.com/kubernetes/kubernetes/pull/48194), [@k82cn](https://github.com/k82cn)) +* Fix kubectl describe for pods with controllerRef ([#45467](https://github.com/kubernetes/kubernetes/pull/45467), [@ddysher](https://github.com/ddysher)) +* Skip errors when unregistering juju kubernetes-workers ([#48144](https://github.com/kubernetes/kubernetes/pull/48144), [@ktsakalozos](https://github.com/ktsakalozos)) +* Configures the Juju Charm code to run kube-proxy with conntrack-max-per-core set to 0 when in an lxc as a workaround for issues when mounting /sys/module/nf_conntrack/parameters/hashsize ([#48450](https://github.com/kubernetes/kubernetes/pull/48450), [@wwwtyro](https://github.com/wwwtyro)) +* Group and order imported packages. ([#48399](https://github.com/kubernetes/kubernetes/pull/48399), [@k82cn](https://github.com/k82cn)) +* RBAC role and role-binding reconciliation now ensures namespaces exist when reconciling on startup. ([#48480](https://github.com/kubernetes/kubernetes/pull/48480), [@liggitt](https://github.com/liggitt)) +* Fix charms leaving services running after remove-unit ([#48446](https://github.com/kubernetes/kubernetes/pull/48446), [@Cynerva](https://github.com/Cynerva)) +* Added helper funcs to schedulercache.Resource. ([#46926](https://github.com/kubernetes/kubernetes/pull/46926), [@k82cn](https://github.com/k82cn)) +* When performing a GET then PUT, the kube-apiserver must write the canonical representation of the object to etcd if the current value does not match. That allows external agents to migrate content in etcd from one API version to another, across different storage types, or across varying encryption levels. This fixes a bug introduced in 1.5 where we unintentionally stopped writing the newest data. ([#48394](https://github.com/kubernetes/kubernetes/pull/48394), [@smarterclayton](https://github.com/smarterclayton)) +* Fixed kubernetes charms not restarting services after snap upgrades ([#48440](https://github.com/kubernetes/kubernetes/pull/48440), [@Cynerva](https://github.com/Cynerva)) +* Fix: namespace-create have kubectl in path ([#48439](https://github.com/kubernetes/kubernetes/pull/48439), [@ktsakalozos](https://github.com/ktsakalozos)) +* add validate for advanced audit policy ([#47784](https://github.com/kubernetes/kubernetes/pull/47784), [@CaoShuFeng](https://github.com/CaoShuFeng)) +* Support NoSchedule taints correctly in DaemonSet controller. ([#48189](https://github.com/kubernetes/kubernetes/pull/48189), [@mikedanese](https://github.com/mikedanese)) +* Adds configuration option for Swift object store container name to OpenStack Heat provider. ([#48281](https://github.com/kubernetes/kubernetes/pull/48281), [@hogepodge](https://github.com/hogepodge)) +* Allow the system:heapster ClusterRole read access to deployments ([#48357](https://github.com/kubernetes/kubernetes/pull/48357), [@faraazkhan](https://github.com/faraazkhan)) +* Ensure get_password is accessing a file that exists. ([#48351](https://github.com/kubernetes/kubernetes/pull/48351), [@ktsakalozos](https://github.com/ktsakalozos)) +* GZip openapi schema if accepted by client ([#48151](https://github.com/kubernetes/kubernetes/pull/48151), [@apelisse](https://github.com/apelisse)) +* Fixes issue where you could not mount NFS or glusterFS volumes using hostnames on GCI/GKE with COS images. ([#42376](https://github.com/kubernetes/kubernetes/pull/42376), [@jingxu97](https://github.com/jingxu97)) +* Previously a deleted service account token secret would be considered valid until it was reaped. Now it is invalid as soon as the deletionTimestamp is set. ([#48343](https://github.com/kubernetes/kubernetes/pull/48343), [@deads2k](https://github.com/deads2k)) +* Securing the cluster created by Juju ([#47835](https://github.com/kubernetes/kubernetes/pull/47835), [@ktsakalozos](https://github.com/ktsakalozos)) +* addon-resizer flapping behavior was removed. ([#46850](https://github.com/kubernetes/kubernetes/pull/46850), [@x13n](https://github.com/x13n)) +* Change default `httpGet` probe `User-Agent` to `kube-probe/` if none specified, overriding the default Go `User-Agent`. ([#47729](https://github.com/kubernetes/kubernetes/pull/47729), [@paultyng](https://github.com/paultyng)) +* Registries backed by the generic Store's `Update` implementation support delete-on-update, which allows resources to be automatically deleted during an update provided: ([#48065](https://github.com/kubernetes/kubernetes/pull/48065), [@ironcladlou](https://github.com/ironcladlou)) + * Garbage collection is enabled for the Store + * The resource being updated has no finalizers + * The resource being updated has a non-nil DeletionGracePeriodSeconds equal to 0 + * With this fix, Custom Resource instances now also support delete-on-update behavior under the same circumstances. +* Fixes an edge case where "kubectl apply view-last-applied" would emit garbage if the data contained Go format codes. ([#45611](https://github.com/kubernetes/kubernetes/pull/45611), [@atombender](https://github.com/atombender)) +* Bumped Heapster to v1.4.0. ([#48205](https://github.com/kubernetes/kubernetes/pull/48205), [@piosz](https://github.com/piosz)) + * More details about the release https://github.com/kubernetes/heapster/releases/tag/v1.4.0 +* In GCE and in a "private master" setup, do not set the network-plugin provider to CNI by default if a network policy provider is given. ([#48004](https://github.com/kubernetes/kubernetes/pull/48004), [@dnardo](https://github.com/dnardo)) +* Add generic NoSchedule toleration to fluentd in gcp config. ([#48182](https://github.com/kubernetes/kubernetes/pull/48182), [@gmarek](https://github.com/gmarek)) +* kubeadm: Expose only the cluster-info ConfigMap in the kube-public ns ([#48050](https://github.com/kubernetes/kubernetes/pull/48050), [@luxas](https://github.com/luxas)) +* Fixes kubelet race condition in container manager. ([#48123](https://github.com/kubernetes/kubernetes/pull/48123), [@msau42](https://github.com/msau42)) +* Bump GCE ContainerVM to container-vm-v20170627 ([#48159](https://github.com/kubernetes/kubernetes/pull/48159), [@zmerlynn](https://github.com/zmerlynn)) +* Add PriorityClassName and Priority fields to PodSpec. ([#45610](https://github.com/kubernetes/kubernetes/pull/45610), [@bsalamat](https://github.com/bsalamat)) +* Add a failsafe for etcd not returning a connection string ([#48054](https://github.com/kubernetes/kubernetes/pull/48054), [@ktsakalozos](https://github.com/ktsakalozos)) +* Fix fluentd-gcp configuration to facilitate JSON parsing ([#48139](https://github.com/kubernetes/kubernetes/pull/48139), [@crassirostris](https://github.com/crassirostris)) +* Setting env var ENABLE_BIG_CLUSTER_SUBNETS=true will allow kube-up.sh to start clusters bigger that 4095 Nodes on GCE. ([#47513](https://github.com/kubernetes/kubernetes/pull/47513), [@gmarek](https://github.com/gmarek)) +* When determining the default external host of the kube apiserver, any configured cloud provider is now consulted ([#47038](https://github.com/kubernetes/kubernetes/pull/47038), [@yastij](https://github.com/yastij)) +* Updated comments for functions. ([#47242](https://github.com/kubernetes/kubernetes/pull/47242), [@k82cn](https://github.com/k82cn)) +* Fix setting juju worker labels during deployment ([#47178](https://github.com/kubernetes/kubernetes/pull/47178), [@ktsakalozos](https://github.com/ktsakalozos)) +* `kubefed init` correctly checks for RBAC API enablement. ([#48077](https://github.com/kubernetes/kubernetes/pull/48077), [@liggitt](https://github.com/liggitt)) +* The garbage collector now cascades deletion properly when deleting an object with propagationPolicy="background". This resolves issue [[#44046](https://github.com/kubernetes/kubernetes/pull/44046)](https://github.com/kubernetes/kubernetes/issues/44046), so that when a deployment is deleted with propagationPolicy="background", the garbage collector ensures dependent pods are deleted as well. ([#44058](https://github.com/kubernetes/kubernetes/pull/44058), [@caesarxuchao](https://github.com/caesarxuchao)) +* Fix restart action on juju kubernetes-master ([#47170](https://github.com/kubernetes/kubernetes/pull/47170), [@ktsakalozos](https://github.com/ktsakalozos)) +* e2e: bump kubelet's resurce usage limit ([#47971](https://github.com/kubernetes/kubernetes/pull/47971), [@yujuhong](https://github.com/yujuhong)) +* Cluster Autoscaler 0.6 ([#48074](https://github.com/kubernetes/kubernetes/pull/48074), [@mwielgus](https://github.com/mwielgus)) +* Checked whether balanced Pods were created. ([#47488](https://github.com/kubernetes/kubernetes/pull/47488), [@k82cn](https://github.com/k82cn)) +* Update protobuf time serialization for a one second granularity ([#47975](https://github.com/kubernetes/kubernetes/pull/47975), [@deads2k](https://github.com/deads2k)) +* Bumped Heapster to v1.4.0-beta.0 ([#47961](https://github.com/kubernetes/kubernetes/pull/47961), [@piosz](https://github.com/piosz)) +* `kubectl api-versions` now always fetches information about enabled API groups and versions instead of using the local cache. ([#48016](https://github.com/kubernetes/kubernetes/pull/48016), [@liggitt](https://github.com/liggitt)) +* Removes alpha feature gate for affinity annotations. ([#47869](https://github.com/kubernetes/kubernetes/pull/47869), [@timothysc](https://github.com/timothysc)) +* Websocket requests may now authenticate to the API server by passing a bearer token in a websocket subprotocol of the form `base64url.bearer.authorization.k8s.io.` ([#47740](https://github.com/kubernetes/kubernetes/pull/47740), [@liggitt](https://github.com/liggitt)) +* Update cadvisor to v0.26.1 ([#47940](https://github.com/kubernetes/kubernetes/pull/47940), [@Random-Liu](https://github.com/Random-Liu)) +* Bump up npd version to v0.4.1 ([#47892](https://github.com/kubernetes/kubernetes/pull/47892), [@ajitak](https://github.com/ajitak)) +* Allow StorageClass Ceph RBD to specify image format and image features. ([#45805](https://github.com/kubernetes/kubernetes/pull/45805), [@weiwei04](https://github.com/weiwei04)) +* Removed mesos related labels. ([#46824](https://github.com/kubernetes/kubernetes/pull/46824), [@k82cn](https://github.com/k82cn)) +* Add RBAC support to fluentd-elasticsearch cluster addon ([#46203](https://github.com/kubernetes/kubernetes/pull/46203), [@simt2](https://github.com/simt2)) +* Avoid redundant copying of tars during kube-up for gce if the same file already exists ([#46792](https://github.com/kubernetes/kubernetes/pull/46792), [@ianchakeres](https://github.com/ianchakeres)) +* container runtime version has been added to the output of `kubectl get nodes -o=wide` as `CONTAINER-RUNTIME` ([#46646](https://github.com/kubernetes/kubernetes/pull/46646), [@rickypai](https://github.com/rickypai)) +* cAdvisor binds only to the interface that kubelet is running on instead of all interfaces. ([#47195](https://github.com/kubernetes/kubernetes/pull/47195), [@dims](https://github.com/dims)) +* The schema of the API that are served by the kube-apiserver, together with a small amount of generated code, are moved to k8s.io/api (https://github.com/kubernetes/api). ([#44784](https://github.com/kubernetes/kubernetes/pull/44784), [@caesarxuchao](https://github.com/caesarxuchao)) + + + +# v1.6.7 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.6/examples) + +## Downloads for v1.6.7 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes.tar.gz) | `6522086d9666543ed4e88a791626953acd1ea843eb024f16f4a4a2390dcbb2b2` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-src.tar.gz) | `b2a73f140966ba0080ce16e3b9a67d5fd9849b36942f3490e9f8daa0fe4511c4` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-darwin-386.tar.gz) | `ffa06a16a3091b2697ef14f8e28bb08000455bd9b719cf0f510f011b864cd1e0` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-darwin-amd64.tar.gz) | `32de3e38f7a60c9171a63f43a2c7f0b2d8f8ba55d51468d8dbf7847dbd943b45` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-386.tar.gz) | `d9c27321007607cc5afb2ff5b3cac210471d55dd1c3a478c6703ab72d187211e` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-amd64.tar.gz) | `54947ef84181e89f9dbacedd54717cbed5cc7f9c36cb37bc8afc9097648e2c91` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-arm64.tar.gz) | `e96d300eb6526705b1c1bedaaf3f4746f3e5d6b49ccc7e60650eb9ee022fba0e` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-arm.tar.gz) | `e4605dca3948264fba603dc8f95b202528eb8ad4ca99c7f3a61f77031e7ba756` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-ppc64le.tar.gz) | `8b77793aea5abf1c17b73f7e11476b9d387f3dc89e5d8405ffadd1a395258483` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-linux-s390x.tar.gz) | `ff3ddec930a0ffdc83fe324d544d4657d57a64a3973fb9df4ddaa7a98228d7fb` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-windows-386.tar.gz) | `ce09e4b071bb06039ad9bdf6a1059d59cf129dce942600fcdc9d320ff0c07a7a` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-client-windows-amd64.tar.gz) | `e985644f582945274e82764742f02bd175f05128c1945e987d06973dd5f5a56d` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-server-linux-amd64.tar.gz) | `1287bb85f1057eae53f8bb4e4475c990783e43d2f57ea1c551fdf2da7ca5345d` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-server-linux-arm64.tar.gz) | `51623850475669be59f6428922ba316d4dd60d977f892adfaf0ca0845c38506c` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-server-linux-arm.tar.gz) | `a5331022d29f085e6b7fc4ae064af64024eba6a02ae54e78c2e84b40d0aec598` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-server-linux-ppc64le.tar.gz) | `93d52e84d0fea5bdf3ede6784b8da6c501e0430c74430da3a125bd45c557e10a` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-server-linux-s390x.tar.gz) | `baccbb6fc497f433c2bd93146c31fbca1da427e0d6ac8483df26dd42ccb79c6e` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-linux-amd64.tar.gz) | `0cfdd51de879869e7ef40a17dfa1a303a596833fb567c3b7e4f82ba0cf863839` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-linux-arm64.tar.gz) | `d07ef669d94ea20a4a9e3a38868ac389dab4d3f2bdf8b27280724fe63f4de3c3` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-linux-arm.tar.gz) | `1cc9b6a8aee4e59967421cbded21c0a20f02c39288781f504e55ad6ca71d1037` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-linux-ppc64le.tar.gz) | `3f412096d8b249d671f924c3ee4aecf3656186fde4509ce9f560f67a9a166b6d` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-linux-s390x.tar.gz) | `2cca7629c1236b3435e6e31498c1f8216d7cca4236d8ad0ae10c83a422519a34` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.6.7/kubernetes-node-windows-amd64.tar.gz) | `4f859fba52c044a9ce703528760967e1efa47a359603b5466c0dc0748eb25e36` + +## Changelog since v1.6.6 + +### Other notable changes + +* kubeadm: Expose only the cluster-info ConfigMap in the kube-public ns ([#48050](https://github.com/kubernetes/kubernetes/pull/48050), [@luxas](https://github.com/luxas)) +* Fix kubelet request timeout when stopping a container. ([#46267](https://github.com/kubernetes/kubernetes/pull/46267), [@Random-Liu](https://github.com/Random-Liu)) +* Add generic NoSchedule toleration to fluentd in gcp config. ([#48182](https://github.com/kubernetes/kubernetes/pull/48182), [@gmarek](https://github.com/gmarek)) +* Update cluster-proportional-autoscaler, fluentd-gcp, and kube-addon-manager, and kube-dns addons with refreshed base images containing fixes for CVE-2016-9841, CVE-2016-9843, CVE-2017-2616, and CVE-2017-6512. ([#47454](https://github.com/kubernetes/kubernetes/pull/47454), [@ixdy](https://github.com/ixdy)) +* Fix fluentd-gcp configuration to facilitate JSON parsing ([#48139](https://github.com/kubernetes/kubernetes/pull/48139), [@crassirostris](https://github.com/crassirostris)) +* Bump runc to v1.0.0-rc2-49-gd223e2a - fixes `failed to initialise top level QOS containers` kubelet error. ([#48117](https://github.com/kubernetes/kubernetes/pull/48117), [@sjenning](https://github.com/sjenning)) +* `kubefed init` correctly checks for RBAC API enablement. ([#48077](https://github.com/kubernetes/kubernetes/pull/48077), [@liggitt](https://github.com/liggitt)) +* `kubectl api-versions` now always fetches information about enabled API groups and versions instead of using the local cache. ([#48016](https://github.com/kubernetes/kubernetes/pull/48016), [@liggitt](https://github.com/liggitt)) +* Fix kubelet event recording for selected events. ([#46246](https://github.com/kubernetes/kubernetes/pull/46246), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Fix `Invalid value: "foregroundDeletion"` error when attempting to delete a resource. ([#46500](https://github.com/kubernetes/kubernetes/pull/46500), [@tnozicka](https://github.com/tnozicka)) + + + +# v1.7.0 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) + +## Downloads for v1.7.0 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes.tar.gz) | `947f1dd9a9b6b427faac84067a30c86e83e6391eb42f09ddcc50a8694765c31a` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-src.tar.gz) | `d3d8b0bfc31164dd703b38d8484cfed7981cacd1e496731880afa87f8bf39aac` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-darwin-386.tar.gz) | `da298e24318e57ac8a558c390117bd7e9e596b3bdf1c5960979898fefe6c5c88` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-darwin-amd64.tar.gz) | `c22f72e1592731155db5b05d0d660f1d7314288cb020f7980e2a109d9e7ba0e5` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-386.tar.gz) | `fc8e90e96360c3a2c8ec56903ab5acde1dffa4d641e1ee27b804ee6d8e824cf6` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-amd64.tar.gz) | `8b3ed03f8a4b3a1ec124abde01632ee6dcec9daf9376f0288fd7500b5173981c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-arm64.tar.gz) | `8930c74dab9ada31e6994f0dc3fb22d41a602a2880b6b17112718ce73eac0574` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-arm.tar.gz) | `20a6f4645cab3c0aef72f849ae90b2691605fd3f670ce36cc8aa11aef31c6edb` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-ppc64le.tar.gz) | `509e214d55e8df1906894cbdc166e791761a3b82a52bcea0de65ceca3143c8b5` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-linux-s390x.tar.gz) | `fd39f47b691fc608f2ea3fed35408dd4c0b1d198605ec17363b0987b123a4702` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-windows-386.tar.gz) | `d9b72cfeefee0cd2db5f6a388bdb9da1e33514498f4d88be1b04282db5bfbd3d` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-client-windows-amd64.tar.gz) | `c536952bd29a7ae12c8fa148d592cc3c353dea4d0079e8497edaf8a759a16006` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-server-linux-amd64.tar.gz) | `175fc9360d4f26b5f60b467798d851061f01d0ca555c254ef44a8a9822cf7560` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-server-linux-arm64.tar.gz) | `f1e039e0e2923d1ea02fd76453aa51715ca83c5c26ca1a761ace2c717b79154f` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-server-linux-arm.tar.gz) | `48dc95e5230d7a44b64b379f9cf2e1ec72b7c4c7c62f4f3e92a73076ad6376db` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-server-linux-ppc64le.tar.gz) | `dc079cd18333c201cfd0f5b0e93e602d020a9e665d8c13968170a2cd89eebeb4` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-server-linux-s390x.tar.gz) | `fe6674e7d69aeffd522e543e957897e2cb943e82d5ccd368ccb9009e1128273f` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-linux-amd64.tar.gz) | `6c6cece62bad5bfeaf4a4b14e93c9ba99c96dc82b7855a2214cdf37a65251de8` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-linux-arm64.tar.gz) | `dd75dc044fb1f337b60cb4b27c9bbdca4742d8bc0a1d03d13553a1b8fc593e98` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-linux-arm.tar.gz) | `c5d832c93c24d77414a880d8b7c4fac9a7443305e8e5c704f637ff023ff56f94` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-linux-ppc64le.tar.gz) | `649813a257353c5b85605869e33aeeb0c070e64e6fee18bc9c6e70472aa05677` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-linux-s390x.tar.gz) | `5ca0a7e9e90b2de7aff7bbdc84f662140ce847ea46cdb78802ce75459e0cc043` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.0/kubernetes-node-windows-amd64.tar.gz) | `4b84b0025aff1d4406f3e5cd5fa86940f594e3ec6e1d12d3ce1eea5f5b3fc55d` + +## **Major Themes** + +Kubernetes 1.7 is a milestone release that adds security, stateful application, and extensibility features motivated by widespread production use of Kubernetes. + +Security enhancements in this release include encrypted secrets (alpha), network policy for pod-to-pod communication, the node authorizer to limit Kubelet access to API resources, and Kubelet client / server TLS certificate rotation (alpha). + +Major features for stateful applications include automated updates to StatefulSets, enhanced updates for DaemonSets, a burst mode for faster StatefulSets scaling, and (alpha) support for local storage. + +Extensibility features include API aggregation (beta), CustomResourceDefinitions (beta) in favor of ThirdPartyResources, support for extensible admission controllers (alpha), pluggable cloud providers (alpha), and container runtime interface (CRI) enhancements. + +## **Action Required Before Upgrading** + +### Network + +* NetworkPolicy has been promoted from extensions/v1beta1 to the new networking.k8s.io/v1 API group. The structure remains unchanged from the v1beta1 API. The net.beta.kubernetes.io/network-policy annotation on Namespaces (used to opt in to isolation) has been removed. Instead, isolation is now determined on a per-pod basis. A NetworkPolicy may target a pod for isolation by including the pod in its spec.podSelector. Targeted Pods accept the traffic specified in the respective NetworkPolicy (and nothing else). Pods not targeted by any NetworkPolicy accept all traffic by default. ([#39164](https://github.com/kubernetes/kubernetes/pull/39164), [@danwinship](https://github.com/danwinship)) + + **Action Required:** When upgrading to Kubernetes 1.7 (and a [network plugin](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy/) that supports the new NetworkPolicy v1 semantics), you should consider the following. + + The v1beta1 API used an annotation on Namespaces to activate the DefaultDeny policy for an entire Namespace. To activate default deny in the v1 API, you can create a NetworkPolicy that matches all Pods but does not allow any traffic: + + ```yaml + kind: NetworkPolicy + apiVersion: networking.k8s.io/v1 + metadata: + name: default-deny + spec: + podSelector: + ``` + + This will ensure that Pods that aren't matched by any other NetworkPolicy will continue to be fully-isolated, as they were in v1beta1. + + In Namespaces that previously did not have the "DefaultDeny" annotation, you should delete any existing NetworkPolicy objects. These had no effect in the v1beta1 API, but with v1 semantics they might cause some traffic to be unintentionally blocked. + + +### Storage + +* Alpha volume provisioning is removed and default storage class should be used instead. ([#44090](https://github.com/kubernetes/kubernetes/pull/44090), [@NickrenREN](https://github.com/NickrenREN)) + +* Portworx volume driver no longer has to run on the master. ([#45518](https://github.com/kubernetes/kubernetes/pull/45518), [@harsh-px](https://github.com/harsh-px)) + +* Default behavior in Cinder storageclass is changed. If availability is not specified, the zone is chosen by algorithm. It makes possible to spread stateful pods across many zones. ([#44798](https://github.com/kubernetes/kubernetes/pull/44798), [@zetaab](https://github.com/zetaab)) + +* PodSpecs containing parent directory references such as `..` (for example, `../bar`) in hostPath volume path or in volumeMount subpaths must be changed to the simple absolute path. Backsteps `..` are no longer allowed.([#47290](https://github.com/kubernetes/kubernetes/pull/47290), [@jhorwit2](https://github.com/jhorwit2)). + + +### API Machinery + +* The Namespace API object no longer supports the deletecollection operation. ([#46407](https://github.com/kubernetes/kubernetes/pull/46407), [@liggitt](https://github.com/liggitt)) + +* The following alpha API groups were unintentionally enabled by default in previous releases, and will no longer be enabled by default in v1.8: ([#47690](https://github.com/kubernetes/kubernetes/pull/47690), [@caesarxuchao](https://github.com/caesarxuchao)) + + * rbac.authorization.k8s.io/v1alpha1 + + * settings.k8s.io/v1alpha1 + + * If you wish to continue using them in v1.8, please enable them explicitly using the `--runtime-config` flag on the apiserver (for example, `--runtime-config="rbac.authorization.k8s.io/v1alpha1,settings.k8s.io/v1alpha1"`) + +* `cluster/update-storage-objects.sh` now supports updating StorageClasses in etcd to storage.k8s.io/v1. You must do this prior to upgrading to 1.8. ([#46116](https://github.com/kubernetes/kubernetes/pull/46116), [@ncdc](https://github.com/ncdc)) + + +### Controller Manager + +* kube-controller-manager has dropped support for the `--insecure-experimental-approve-all-kubelet-csrs-for-group` flag. It is accepted in 1.7, but ignored. Instead, the csrapproving controller uses authorization checks to determine whether to approve certificate signing requests: ([#45619](https://github.com/kubernetes/kubernetes/pull/45619), [@mikedanese](https://github.com/mikedanese)) + + * Before upgrading, users must ensure their controller manager will enable the csrapproving controller, create an RBAC ClusterRole and ClusterRoleBinding to approve CSRs for the same group, then upgrade. Example roles to enable the equivalent behavior can be found in the [TLS bootstrapping](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/) documentation. + + +### kubectl (CLI) +* `kubectl create role` and `kubectl create clusterrole` invocations must be updated to specify multiple resource names as repeated `--resource-name` arguments instead of comma-separated arguments to a single `--resource-name` argument. E.g. `--resource-name=x,y` must become `--resource-name x --resource-name y` ([#44950](https://github.com/kubernetes/kubernetes/pull/44950), [@xilabao](https://github.com/xilabao)) + +* `kubectl create rolebinding` and `kubectl create clusterrolebinding` invocations must be updated to specify multiple subjects as repeated `--user`, `--group`, or `--serviceaccount` arguments instead of comma-separated arguments to a single `--user`, `--group`, or `--serviceaccount`. E.g. `--user=x,y` must become `--user x --user y` ([#43903](https://github.com/kubernetes/kubernetes/pull/43903), [@xilabao](https://github.com/xilabao)) + + +### kubeadm + +* kubeadm: Modifications to cluster-internal resources installed by kubeadm will be overwritten when upgrading from v1.6 to v1.7. ([#47081](https://github.com/kubernetes/kubernetes/pull/47081), [@luxas](https://github.com/luxas)) + +* kubeadm deb/rpm packages: cAdvisor doesn't listen on `0.0.0.0:4194` without authentication/authorization because of the possible information leakage. The cAdvisor API can still be accessed via `https://{node-ip}:10250/stats/`, though. ([kubernetes/release#356](https://github.com/kubernetes/release/pull/356), [@luxas](https://github.com/luxas)) + + +### Cloud Providers + +* Azure: Container permissions for provisioned volumes have changed to private. If you have existing Azure volumes that were created by Kubernetes v1.6.0-v1.6.5, you should change the permissions on them manually. ([#47605](https://github.com/kubernetes/kubernetes/pull/47605), [@brendandburns](https://github.com/brendandburns)) + +* GKE/GCE: New and upgraded 1.7 GCE/GKE clusters no longer have an RBAC ClusterRoleBinding that grants the cluster-admin ClusterRole to the default service account in the kube-system Namespace. ([#46750](https://github.com/kubernetes/kubernetes/pull/46750), [@cjcullen](https://github.com/cjcullen)). If this permission is still desired, run the following command to explicitly grant it, either before or after upgrading to 1.7: + ``` + kubectl create clusterrolebinding kube-system-default --serviceaccount=kube-system:default --clusterrole=cluster-admin + ``` + +## **Known Issues** + +Populated via [v1.7.x known issues / FAQ accumulator](https://github.com/kubernetes/kubernetes/issues/46733) + +* The kube-apiserver discovery APIs (for example, `/apis`) return information about the API groups being served, and can change dynamically. +During server startup, prior to the server reporting healthy (via `/healthz`), not all API groups may be reported. +Wait for the server to report healthy (via `/healthz`) before depending on the information provided by the discovery APIs. +Additionally, since the information returned from the discovery APIs may change dynamically, a cache of the results should not be considered authoritative. +ETag support is planned in a future version to facilitate client caching. +([#47977](https://github.com/kubernetes/kubernetes/pull/47977), [#44957](https://github.com/kubernetes/kubernetes/pull/44957)) + +* The DaemonSet controller will evict running Pods that do not tolerate the NoSchedule taint if the taint is added to a Node. There is an open PR ([#48189](https://github.com/kubernetes/kubernetes/pull/48189)) to resolve this issue, but as this issue also exists in 1.6, and as we do not wish to risk release stability by merging it directly prior to a release without sufficient testing, we have decided to defer merging the PR until the next point release for each minor version ([#48190](https://github.com/kubernetes/kubernetes/pull/48190)). + +* Protobuf serialization does not distinguish between `[]` and `null`. +API fields previously capable of storing and returning either `[]` and `null` via JSON API requests (for example, the Endpoints `subsets` field) +can now store only `null` when created using the protobuf content-type or stored in etcd using protobuf serialization (the default in 1.6). +JSON API clients should tolerate `null` values for such fields, and treat `null` and `[]` as equivalent in meaning unless specifically documented otherwise for a particular field. ([#44593](https://github.com/kubernetes/kubernetes/pull/44593)) + +* Local volume source paths that are directories and not mount points fail to unmount. A fix is in process ([#48331](https://github.com/kubernetes/kubernetes/issues/48331)). + +* Services of type LoadBalancer (on GCE/GKE) that have static IP addresses will cause the Service Controller to panic and thereby causing the kube-controller-manager to crash loop. +([#48848](https://github.com/kubernetes/kubernetes/issues/48848)) + +## **Deprecations** + +### Cluster provisioning scripts +* cluster/ubuntu: Removed due to [deprecation](https://github.com/kubernetes/kubernetes/tree/master/cluster#cluster-configuration) and lack of maintenance. ([#44344](https://github.com/kubernetes/kubernetes/pull/44344), [@mikedanese](https://github.com/mikedanese)) + +* cluster/aws: Removed due to [deprecation](https://github.com/kubernetes/kubernetes/pull/38772) and lack of maintenance. ([#42196](https://github.com/kubernetes/kubernetes/pull/42196), [@zmerlynn](https://github.com/zmerlynn)) + + +### Client libraries +* Swagger 1.2 spec (`/swaggerapi/*`) is deprecated. Please use OpenAPI instead. + +### DaemonSet +* DaemonSet’s spec.templateGeneration has been deprecated. ([#45924](https://github.com/kubernetes/kubernetes/pull/45924), [@janetkuo](https://github.com/janetkuo)) + +### kube-proxy +* In 1.7, the kube-proxy component has been converted to use a configuration file. The old flags still work in 1.7, but they are being deprecated and will be removed in a future release. Cluster administrators are advised to switch to using the configuration file, but no action is strictly necessary in 1.7. ([#34727](https://github.com/kubernetes/kubernetes/pull/34727), [@ncdc](https://github.com/ncdc)) + +### Namespace +* The Namespace API object no longer supports the deletecollection operation. ([#46407](https://github.com/kubernetes/kubernetes/pull/46407), [@liggitt](https://github.com/liggitt)) + + +### Scheduling +* If you are using `AffinityInAnnotations=true` in `--feature-gates`, then the 1.7 release is your last opportunity to convert from specifying affinity/anti-affinity using the scheduler.alpha.kubernetes.io/affinity annotation on Pods, to using the Affinity field of PodSpec. Support for the alpha version of node and pod affinity (which uses the scheduler.alpha.kubernetes.io/affinity annotations on Pods) is going away **in Kubernetes 1.8** (not this release, but the next release). If you have not enabled AffinityInAnnotations=true in `--feature-gates`, then this change does not affect you. + +## **Notable Features** + +Features for this release were tracked via the use of the [kubernetes/features](https://github.com/kubernetes/features) issues repo. Each Feature issue is owned by a Special Interest Group from [kubernetes/community](https://github.com/kubernetes/community) + +## Kubefed + +* Deprecate the `--secret-name` flag from `kubefed join`, instead generating the secret name arbitrarily. ([#42513](https://github.com/kubernetes/kubernetes/pull/42513), [@perotinus](https://github.com/perotinus)) + + +### **Kubernetes API** +#### User Provided Extensions +* [beta] ThirdPartyResource is deprecated. Please migrate to the successor, CustomResourceDefinition. For more information, see [Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) and [Migrate a ThirdPartyResource to CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/migrate-third-party-resource/). + +* [beta] User-provided apiservers can be aggregated (served along with) the rest of the Kubernetes API. See [Extending the Kubernetes API with the aggregation layer](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/), [Configure the aggregation layer](https://kubernetes.io/docs/tasks/access-kubernetes-api/configure-aggregation-layer/), and [Setup an extension API server](https://kubernetes.io/docs/tasks/access-kubernetes-api/setup-extension-api-server/). + +* [alpha] Adding admissionregistration API group which enables dynamic registration of initializers and external admission webhooks. ([#46294](https://github.com/kubernetes/kubernetes/pull/46294), [@caesarxuchao](https://github.com/caesarxuchao)) + + +### **Application Deployment** +#### StatefulSet +* [beta] StatefulSet supports [RollingUpdate](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#rolling-updates) and [OnDelete](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#on-delete) update strategies. + +* [alpha] StatefulSet authors should be able to relax the [ordering](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#orderedready-pod-management) and [parallelism](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management) policies for software that can safely support rapid, out-of-order changes. + +#### DaemonSet +* [beta] DaemonSet supports history and rollback. See [Performing a Rollback on a DaemonSet](https://kubernetes.io/docs/tasks/manage-daemon/rollback-daemon-set/). + +#### Deployments +* [beta] Deployments uses a hashing collision avoidance mechanism that ensures new rollouts will not block on hashing collisions anymore. ([kubernetes/features#287](https://github.com/kubernetes/features/issues/287)) + +#### PodDisruptionBudget +* [beta] PodDisruptionBudget has a new field MaxUnavailable, which allows users to specify the maximum number of disruptions that can be tolerated during eviction. For more information, see [Pod Disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) and [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). +* PodDisruptionBudget now uses [ControllerRef](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/controller-ref.md) to make the right decisions about Pod eviction even if the built in application controllers have overlapping selectors. + +### **Security** +#### Admission Control +* [alpha] Add [extensible external admission control](https://kubernetes.io/docs/admin/extensible-admission-controllers/). + +#### TLS Bootstrapping +* [alpha] Rotation of the server TLS certificate on the kubelet. See [TLS bootstrapping - approval controller](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller). + +* [alpha] Rotation of the client TLS certificate on the kubelet. See [TLS bootstrapping - kubelet configuration](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration). + +* [beta] [Kubelet TLS Bootstrap](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration) + +#### Audit Logging +* [alpha] Advanced Auditing enhances the Kubernetes API [audit logging](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-logs) capabilities through a customizable policy, pluggable audit backends, and richer audit data. + +#### Encryption at Rest +* [alpha] Encrypt secrets stored in etcd. For more information, see [Securing a Cluster](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/) and [Encrypting data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). + +#### Node Authorization +* [beta] A new Node authorization mode and NodeRestriction admission plugin, when used in combination, limit nodes' access to specific APIs, so that they may only modify their own Node API object, only modify Pod objects bound to themselves, and only retrieve secrets and configmaps referenced by pods bound to themselves. See [Using Node Authorization](https://kubernetes.io/docs/admin/authorization/node/) for more information. + + +### **Application Autoscaling** +#### Horizontal Pod Autoscaler +* [alpha] [HPA Status Conditions](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#appendix-horizontal-pod-autoscaler-status-conditions). + + +### **Cluster Lifecycle** +#### kubeadm +* [alpha] Manual [upgrades for kubeadm from v1.6 to v1.7](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm-upgrade-1-7/). Automated upgrades ([kubernetes/features#296](https://github.com/kubernetes/features/issues/296)) are targeted for v1.8. + +#### Cloud Provider Support +* [alpha] Improved support for out-of-tree and out-of-process cloud providers, a.k.a pluggable cloud providers. See [Build and Run cloud-controller-manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller) documentation. + + +### **Cluster Federation** +#### Placement Policy +* [alpha] The federation-apiserver now supports a SchedulingPolicy admission controller that enables policy-based control over placement of federated resources. For more information, see [Set up placement policies in Federation](https://kubernetes.io/docs/tasks/federation/set-up-placement-policies-federation/). + +#### Cluster Selection +* [alpha] Federation [ClusterSelector annotation](https://kubernetes.io/docs/tasks/administer-federation/cluster/#clusterselector-annotation) to direct objects to federated clusters with matching labels. + + +### **Instrumentation** +#### Core Metrics API +* [alpha] Introduces a lightweight monitoring component for serving the core resource metrics API used by the Horizontal Pod Autoscaler and other components ([kubernetes/features#271](https://github.com/kubernetes/features/issues/271)) + + +### **Internationalization** + +* Add Traditional Chinese translation for kubectl ([#46559](https://github.com/kubernetes/kubernetes/pull/46559), [@warmchang](https://github.com/warmchang)) + +* Add Japanese translation for kubectl ([#46756](https://github.com/kubernetes/kubernetes/pull/46756), [@girikuncoro](https://github.com/girikuncoro)) + +* Add Simplified Chinese translation for kubectl ([#45573](https://github.com/kubernetes/kubernetes/pull/45573), [@shiywang](https://github.com/shiywang)) + +### **kubectl (CLI)** +* Features + + * `kubectl logs` supports specifying a container name when using label selectors ([#44282](https://github.com/kubernetes/kubernetes/pull/44282), [@derekwaynecarr](https://github.com/derekwaynecarr)) + + * `kubectl rollout` supports undo and history for DaemonSet ([#46144](https://github.com/kubernetes/kubernetes/pull/46144), [@janetkuo](https://github.com/janetkuo)) + + * `kubectl rollout` supports status and history for StatefulSet ([#46669](https://github.com/kubernetes/kubernetes/pull/46669), [@kow3ns](https://github.com/kow3ns)). + + * Implement `kubectl get controllerrevisions` ([#46655](https://github.com/kubernetes/kubernetes/pull/46655), [@janetkuo](https://github.com/janetkuo)) + + * `kubectl create clusterrole` supports `--non-resource-url` ([#45809](https://github.com/kubernetes/kubernetes/pull/45809), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * `kubectl logs` and `kubectl attach` support specifying a wait timeout with `--pod-running-timeout` + + * ([#41813](https://github.com/kubernetes/kubernetes/pull/41813), [@shiywang](https://github.com/shiywang)) + + * New commands + + * Add `kubectl config rename-context` ([#46114](https://github.com/kubernetes/kubernetes/pull/46114), [@arthur0](https://github.com/arthur0)) + + * Add `kubectl apply edit-last-applied` subcommand ([#42256](https://github.com/kubernetes/kubernetes/pull/42256), [@shiywang](https://github.com/shiywang)) + + * Strategic Merge Patch + + * Reference docs now display the patch type and patch merge key used by `kubectl apply` to merge and identify unique elements in arrays. + + * `kubectl edit` and `kubectl apply` will keep the ordering of elements in merged lists ([#45980](https://github.com/kubernetes/kubernetes/pull/45980), [@mengqiy](https://github.com/mengqiy)) + + * New patch directive (retainKeys) to specifying clearing fields missing from the request ([#44597](https://github.com/kubernetes/kubernetes/pull/44597), [@mengqiy](https://github.com/mengqiy)) + + * Open API now includes strategic merge patch tags (previously only in go struct tags) ([#44121](https://github.com/kubernetes/kubernetes/pull/44121), [@mbohlool](https://github.com/mbohlool)) + + * Plugins + + * Introduces the ability to extend kubectl by adding third-party plugins. Developer preview, please refer to the documentation for instructions about how to use it. ([#37499](https://github.com/kubernetes/kubernetes/pull/37499), [@fabianofranz](https://github.com/fabianofranz)) + + * Added support for a hierarchy of kubectl plugins (a tree of plugins as children of other plugins). ([#45981](https://github.com/kubernetes/kubernetes/pull/45981), [@fabianofranz](https://github.com/fabianofranz)) + + * Added exported env vars to kubectl plugins so that plugin developers have access to global flags, namespace, the plugin descriptor and the full path to the caller binary. + + * Enhancement + + * `kubectl auth can-i` now supports non-resource URLs ([#46432](https://github.com/kubernetes/kubernetes/pull/46432), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * `kubectl set selector` and `kubectl set subject` no longer print "running in local/dry-run mode..." at the top. The output can now be piped and interpretted as yaml or json ([#46507](https://github.com/kubernetes/kubernetes/pull/46507), [@bboreham](https://github.com/bboreham)) + + * When using an in-cluster client with an empty configuration, the `--namespace` flag is now honored ([#46299](https://github.com/kubernetes/kubernetes/pull/46299), [@ncdc](https://github.com/ncdc)) + + * The help message for missingResourceError is now generic ([#45582](https://github.com/kubernetes/kubernetes/pull/45582), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * `kubectl taint node` now supports label selectors ([#44740](https://github.com/kubernetes/kubernetes/pull/44740), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + + * `kubectl proxy --www` now logs a warning when the dir is invalid ([#44952](https://github.com/kubernetes/kubernetes/pull/44952), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * `kubectl taint` output has been enhanced with the operation ([#43171](https://github.com/kubernetes/kubernetes/pull/43171), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + + * kubectl `--user` and `--cluster` now support completion ([#44251](https://github.com/kubernetes/kubernetes/pull/44251), [@superbrothers](https://github.com/superbrothers)) + + * `kubectl config use-context` now supports completion ([#42336](https://github.com/kubernetes/kubernetes/pull/42336), [@superbrothers](https://github.com/superbrothers)) + + * `kubectl version` now supports `--output` ([#39858](https://github.com/kubernetes/kubernetes/pull/39858), [@alejandroEsc](https://github.com/alejandroEsc)) + + * `kubectl create configmap` has a new option `--from-env-file` that populates a configmap from file which follows a key=val format for each line. ([#38882](https://github.com/kubernetes/kubernetes/pull/38882), [@fraenkel](https://github.com/fraenkel)) + + * `kubectl create secret` has a new option `--from-env-file` that populates a configmap from file which follows a key=val format for each line. + + * Printing/describe + + * Print conditions of RC/RS in `kubectl describe` command. ([#44710](https://github.com/kubernetes/kubernetes/pull/44710), [@xiangpengzhao](https://github.com/xiangpengzhao)) + + * Improved output on `kubectl get` and `kubectl describe` for generic objects. ([#44222](https://github.com/kubernetes/kubernetes/pull/44222), [@fabianofranz](https://github.com/fabianofranz)) + + * In `kubectl describe`, find controllers with ControllerRef, instead of showing the original creator. ([#42849](https://github.com/kubernetes/kubernetes/pull/42849), [@janetkuo](https://github.com/janetkuo)) + + * `kubectl version` has new flag --output (=json or yaml) allowing result of the command to be parsed in either json format or yaml. ([#39858](https://github.com/kubernetes/kubernetes/pull/39858), [@alejandroEsc](https://github.com/alejandroEsc)) + + + * Bug fixes + + * Fix some false negatives in detection of meaningful conflicts during strategic merge patch with maps and lists. ([#43469](https://github.com/kubernetes/kubernetes/pull/43469), [@enisoc](https://github.com/enisoc)) + + * Fix false positive "meaningful conflict" detection for strategic merge patch with integer values. ([#44788](https://github.com/kubernetes/kubernetes/pull/44788), [@enisoc](https://github.com/enisoc)) + + * Restored the ability of kubectl running inside a pod to consume resource files specifying a different namespace than the one the pod is running in. ([#44862](https://github.com/kubernetes/kubernetes/pull/44862), [@liggitt](https://github.com/liggitt)) + + * Kubectl commands run inside a pod using a kubeconfig file now use the namespace specified in the kubeconfig file, instead of using the pod namespace. If no kubeconfig file is used, or the kubeconfig does not specify a namespace, the pod namespace is still used as a fallback. ([#44570](https://github.com/kubernetes/kubernetes/pull/44570), [@liggitt](https://github.com/liggitt)) + + * Fixed `kubectl cluster-info` dump to support multi-container pod. ([#44088](https://github.com/kubernetes/kubernetes/pull/44088), [@xingzhou](https://github.com/xingzhou)) + + * Kubectl will print a warning when deleting the current context ([#42538](https://github.com/kubernetes/kubernetes/pull/42538), [@adohe](https://github.com/adohe)) + + * Fix VolumeClaims/capacity in `kubectl describe statefulsets` output. ([#47573](https://github.com/kubernetes/kubernetes/pull/47573), [@k82cn](https://github.com/k82cn)) + + * Fixed the output of kubectl taint node command with minor improvements. ([#43171](https://github.com/kubernetes/kubernetes/pull/43171), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + + +### **Networking** +#### Network Policy +* [stable] [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) promoted to GA. + * Additionally adds short name "netpol" for networkpolicies ([#42241](https://github.com/kubernetes/kubernetes/pull/42241), [@xiangpengzhao](https://github.com/xiangpengzhao)) + + +#### Load Balancing +* [stable] Source IP Preservation - change Cloud load-balancer strategy to health-checks and respond to health check only on nodes that host pods for the service. See [Create an External Load Balancer - Preserving the client source IP](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip). + Two annotations have been promoted to API fields: + + * Service.Spec.ExternalTrafficPolicy was 'service.beta.kubernetes.io/external-traffic' annotation. + + * Service.Spec.HealthCheckNodePort was 'service.beta.kubernetes.io/healthcheck-nodeport' annotation. + +### **Node Components** +#### Container Runtime Interface +* [alpha] CRI validation testing, which provides a test framework and a suite of tests to validate that the CRI server implementation meets all the requirements. This allows the CRI runtime developers to verify that their runtime conforms to CRI, without needing to set up Kubernetes components or run Kubernetes end-to-end tests. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md) and [release notes](https://github.com/kubernetes-incubator/cri-tools/releases/tag/v0.1)) ([kubernetes/features#292](https://github.com/kubernetes/features/issues/292)) + +* [alpha] Adds support of container metrics in CRI ([docs PR](https://github.com/kubernetes/community/pull/742)) ([kubernetes/features#290](https://github.com/kubernetes/features/issues/290)) + +* [alpha] Integration with [containerd] (https://github.com/containerd/containerd) , which supports basic pod lifecycle and image management. ([docs](https://github.com/kubernetes-incubator/cri-containerd/blob/master/README.md) and [release notes](https://github.com/kubernetes-incubator/cri-containerd/releases/tag/v0.1.0)) ([kubernetes/features#286](https://github.com/kubernetes/features/issues/286)) + +* [GA] The Docker-CRI implementation is GA. The legacy, non-CRI Docker integration has been completely removed. + +* [beta] [CRI-O](https://github.com/kubernetes-incubator/cri-o) v1.0.0-alpha.0. It has passed all e2e tests. ([release notes](https://github.com/kubernetes-incubator/cri-o/releases/tag/v1.0.0-alpha.0)) + +* [beta] [Frakti](https://github.com/kubernetes/frakti) v1.0. It has passed all node conformance tests. ([release notes](https://github.com/kubernetes/frakti/releases/tag/v1.0)) + + + +### **Scheduling** +#### Scheduler Extender +* [alpha] Support for delegating pod binding to a scheduler extender ([kubernetes/features#270](https://github.com/kubernetes/features/issues/270)) + +### **Storage** +#### Local Storage +* [alpha] This feature adds capacity isolation support for local storage at node, container, and volume levels. See updated [Reserve Compute Resources for System Daemons](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/) documentation. + +* [alpha] Make locally attached (non-network attached) storage available as a persistent volume source. For more information, see [Storage Volumes - local](https://kubernetes.io/docs/concepts/storage/volumes/#local). + +#### Volume Plugins +* [stable] Volume plugin for StorageOS provides highly-available cluster-wide persistent volumes from local or attached node storage. See [Persistent Volumes - StorageOS](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#storageos) and [Storage Volumes - StorageOS](https://kubernetes.io/docs/concepts/storage/volumes/#storageos). + +#### Metrics +* [stable] Add support for cloudprovider metrics for storage API calls. See [Controller manager metrics](https://kubernetes.io/docs/concepts/cluster-administration/controller-metrics/) for more information. + +### **Other notable changes** + +#### Admission plugin +* OwnerReferencesPermissionEnforcement admission plugin ignores pods/status. ([#45747](https://github.com/kubernetes/kubernetes/pull/45747), [@derekwaynecarr](https://github.com/derekwaynecarr)) + + +* Ignored mirror pods in PodPreset admission plugin. ([#45958](https://github.com/kubernetes/kubernetes/pull/45958), [@k82cn](https://github.com/k82cn)) + +#### API Machinery +* The protobuf serialization of API objects has been updated to store maps in a predictable order to ensure that the representation of that object does not change when saved into etcd. This prevents the same object from being seen as being modified, even when no values have changed. ([#47701](https://github.com/kubernetes/kubernetes/pull/47701), [@smarterclayton](https://github.com/smarterclayton)) + +* API resource discovery now includes the singularName used to refer to the resource. ([#43312](https://github.com/kubernetes/kubernetes/pull/43312), [@deads2k](https://github.com/deads2k)) + +* Enhance the garbage collection admission plugin so that a user who doesn't have delete permission of the owning object cannot modify the blockOwnerDeletion field of existing ownerReferences, or add new ownerReferences with blockOwnerDeletion=true ([#43876](https://github.com/kubernetes/kubernetes/pull/43876), [@caesarxuchao](https://github.com/caesarxuchao)) + +* Exec and portforward actions over SPDY now properly handle redirects sent by the Kubelet ([#44451](https://github.com/kubernetes/kubernetes/pull/44451), [@ncdc](https://github.com/ncdc)) + +* The proxy subresource APIs for nodes, services, and pods now support the HTTP PATCH method. ([#44929](https://github.com/kubernetes/kubernetes/pull/44929), [@liggitt](https://github.com/liggitt)) + +* The Categories []string field on discovered API resources represents the list of group aliases (e.g. "all") that each resource belongs to. ([#43338](https://github.com/kubernetes/kubernetes/pull/43338), [@fabianofranz](https://github.com/fabianofranz)) + +* [alpha] The Kubernetes API supports retrieving tabular output for API resources via a new mime-type application/json;as=Table;v=v1alpha1;g=meta.k8s.io. The returned object (if the server supports it) will be of type meta.k8s.io/v1alpha1 with Table, and contain column and row information related to the resource. Each row will contain information about the resource - by default it will be the object metadata, but callers can add the ?includeObject=Object query parameter and receive the full object. In the future kubectl will use this to retrieve the results of `kubectl get`. ([#40848](https://github.com/kubernetes/kubernetes/pull/40848), [@smarterclayton](https://github.com/smarterclayton)) + +* The behavior of some watch calls to the server when filtering on fields was incorrect. If watching objects with a filter, when an update was made that no longer matched the filter a DELETE event was correctly sent. However, the object that was returned by that delete was not the (correct) version before the update, but instead, the newer version. That meant the new object was not matched by the filter. This was a regression from behavior between cached watches on the server side and uncached watches, and thus broke downstream API clients. ([#46223](https://github.com/kubernetes/kubernetes/pull/46223), [@smarterclayton](https://github.com/smarterclayton)) + +* OpenAPI spec is now available in protobuf binary and gzip format (with ETag support) ([#45836](https://github.com/kubernetes/kubernetes/pull/45836), [@mbohlool](https://github.com/mbohlool)) + +* Updating apiserver to return UID of the deleted resource. Clients can use this UID to verify that the resource was deleted or waiting for finalizers. ([#45600](https://github.com/kubernetes/kubernetes/pull/45600), [@nikhiljindal](https://github.com/nikhiljindal)) + +* Fix incorrect conflict errors applying strategic merge patches to resources. ([#43871](https://github.com/kubernetes/kubernetes/pull/43871), [@liggitt](https://github.com/liggitt)) + +* Fix init container status reporting when active deadline is exceeded. ([#46305](https://github.com/kubernetes/kubernetes/pull/46305), [@sjenning](https://github.com/sjenning)) + +* Moved qos to api.helpers. ([#44906](https://github.com/kubernetes/kubernetes/pull/44906), [@k82cn](https://github.com/k82cn)) + +* Fix issue with the resource quota controller causing add quota to be resynced at the wrong ([#45685](https://github.com/kubernetes/kubernetes/pull/45685), [@derekwaynecarr](https://github.com/derekwaynecarr)) + +* Added Group/Version/Kind and Action extension to OpenAPI Operations ([#44787](https://github.com/kubernetes/kubernetes/pull/44787), [@mbohlool](https://github.com/mbohlool)) + +* Make clear that meta.KindToResource is only a guess ([#45272](https://github.com/kubernetes/kubernetes/pull/45272), [@sttts](https://github.com/sttts)) + +* Add APIService conditions ([#43301](https://github.com/kubernetes/kubernetes/pull/43301), [@deads2k](https://github.com/deads2k)) + +* Create and push a docker image for the cloud-controller-manager ([#45154](https://github.com/kubernetes/kubernetes/pull/45154), [@luxas](https://github.com/luxas)) + +* Deprecated Binding objects in 1.7. ([#47041](https://github.com/kubernetes/kubernetes/pull/47041), [@k82cn](https://github.com/k82cn)) + +* Adds the Categories []string field to API resources, which represents the list of group aliases (e.g. "all") that every resource belongs to. ([#43338](https://github.com/kubernetes/kubernetes/pull/43338), [@fabianofranz](https://github.com/fabianofranz)) + +* `--service-account-lookup` now defaults to true, requiring the Secret API object containing the token to exist in order for a service account token to be valid. This enables service account tokens to be revoked by deleting the Secret object containing the token. ([#44071](https://github.com/kubernetes/kubernetes/pull/44071), [@liggitt](https://github.com/liggitt)) + +* API Registration is now in beta. ([#45247](https://github.com/kubernetes/kubernetes/pull/45247), [@mbohlool](https://github.com/mbohlool)) + +* The Kubernetes API server now exits if it encounters a networking failure (e.g. the networking interface hosting its address goes away) to allow a process manager (systemd/kubelet/etc) to react to the problem. Previously the server would log the failure and try again to bind to its configured address:port. ([#42272](https://github.com/kubernetes/kubernetes/pull/42272), [@marun](https://github.com/marun)) + +* The Prometheus metrics for the kube-apiserver for tracking incoming API requests and latencies now return the subresource label for correctly attributing the type of API call. ([#46354](https://github.com/kubernetes/kubernetes/pull/46354), [@smarterclayton](https://github.com/smarterclayton)) + +* kube-apiserver now drops unneeded path information if an older version of Windows kubectl sends it. ([#44421](https://github.com/kubernetes/kubernetes/pull/44421), [@mml](https://github.com/mml)) + + +#### Application autoscaling +* Make "upscale forbidden window" and "downscale forbidden window" duration configurable in arguments of kube-controller-manager. ([#42101](https://github.com/kubernetes/kubernetes/pull/42101), [@Dmitry1987](https://github.com/Dmitry1987)) + +#### Application Deployment +* StatefulSetStatus now tracks replicas, readyReplicas, currentReplicas, and updatedReplicas. The semantics of replicas is now consistent with DaemonSet and ReplicaSet, and readyReplicas has the semantics that replicas did prior to 1.7 ([#46669](https://github.com/kubernetes/kubernetes/pull/46669), [@kow3ns](https://github.com/kow3ns)). + +* ControllerRevision type has been added for StatefulSet and DaemonSet history. Clients should not depend on the stability of this type as it may change, as necessary, in future releases to support StatefulSet and DaemonSet update and rollback. We enable this type as we do with beta features, because StatefulSet update and DaemonSet update are enabled. ([#45867](https://github.com/kubernetes/kubernetes/pull/45867), [@kow3ns](https://github.com/kow3ns)) + +* PodDisruptionBudget now uses ControllerRef to decide which controller owns a given Pod, so it doesn't get confused by controllers with overlapping selectors. ([#45003](https://github.com/kubernetes/kubernetes/pull/45003), [@krmayankk](https://github.com/krmayankk)) + +* Deployments are updated to use (1) a more stable hashing algorithm (fnv) than the previous one (adler) and (2) a hashing collision avoidance mechanism that will ensure new rollouts will not block on hashing collisions anymore. ([#44774](https://github.com/kubernetes/kubernetes/pull/44774), [@kargakis](https://github.com/kargakis))([kubernetes/features#287](https://github.com/kubernetes/features/issues/287)) + +* Deployments and DaemonSets rollouts are considered complete when all of the desired replicas are updated and available. This change affects `kubectl rollout status` and Deployment condition. ([#44672](https://github.com/kubernetes/kubernetes/pull/44672), [@kargakis](https://github.com/kargakis)) + +* Job controller now respects ControllerRef to avoid fighting over Pods. ([#42176](https://github.com/kubernetes/kubernetes/pull/42176), [@enisoc](https://github.com/enisoc)) + +* CronJob controller now respects ControllerRef to avoid fighting with other controllers. ([#42177](https://github.com/kubernetes/kubernetes/pull/42177), [@enisoc](https://github.com/enisoc)) + +#### Cluster Autoscaling +* Cluster Autoscaler 0.6. More information available [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/README.md). + +* cluster-autoscaler: Fix duplicate writing of logs. ([#45017](https://github.com/kubernetes/kubernetes/pull/45017), [@MaciekPytel](https://github.com/MaciekPytel)) + + +#### Cloud Provider Enhancement + +* AWS: + + * New 'service.beta.kubernetes.io/aws-load-balancer-extra-security-groups' Service annotation to specify extra Security Groups to be added to ELB created by AWS cloudprovider ([#45268](https://github.com/kubernetes/kubernetes/pull/45268), [@redbaron](https://github.com/redbaron)) + + * Clean up blackhole routes when using kubenet ([#47572](https://github.com/kubernetes/kubernetes/pull/47572), [@justinsb](https://github.com/justinsb)) + + * Maintain a cache of all instances, to fix problem with > 200 nodes with ELBs ([#47410](https://github.com/kubernetes/kubernetes/pull/47410), [@justinsb](https://github.com/justinsb)) + + * Avoid spurious ELB listener recreation - ignore case when matching protocol ([#47391](https://github.com/kubernetes/kubernetes/pull/47391), [@justinsb](https://github.com/justinsb)) + + * Allow configuration of a single security group for ELBs ([#45500](https://github.com/kubernetes/kubernetes/pull/45500), [@nbutton23](https://github.com/nbutton23)) + + * Remove check that forces loadBalancerSourceRanges to be 0.0.0.0/0. ([#38636](https://github.com/kubernetes/kubernetes/pull/38636), [@dhawal55](https://github.com/dhawal55)) + + * Allow setting KubernetesClusterID or KubernetesClusterTag in combination with VPC. ([#42512](https://github.com/kubernetes/kubernetes/pull/42512), [@scheeles](https://github.com/scheeles)) + + * Start recording cloud provider metrics for AWS ([#43477](https://github.com/kubernetes/kubernetes/pull/43477), [@gnufied](https://github.com/gnufied)) + + * AWS: Batch DescribeInstance calls with nodeNames to 150 limit, to stay within AWS filter limits. ([#47516](https://github.com/kubernetes/kubernetes/pull/47516), [@gnufied](https://github.com/gnufied)) + + * AWS: Process disk attachments even with duplicate NodeNames ([#47406](https://github.com/kubernetes/kubernetes/pull/47406), [@justinsb](https://github.com/justinsb)) + + * Allow configuration of a single security group for ELBs ([#45500](https://github.com/kubernetes/kubernetes/pull/45500), [@nbutton23](https://github.com/nbutton23)) + + * Fix support running the master with a different AWS account or even on a different cloud provider than the nodes. ([#44235](https://github.com/kubernetes/kubernetes/pull/44235), [@mrIncompetent](https://github.com/mrIncompetent)) + + * Support node port health check ([#43585](https://github.com/kubernetes/kubernetes/pull/43585), [@foolusion](https://github.com/foolusion)) + + * Support for ELB tagging by users ([#45932](https://github.com/kubernetes/kubernetes/pull/45932), [@lpabon](https://github.com/lpabon)) + +* Azure: + + * Add support for UDP ports ([#45523](https://github.com/kubernetes/kubernetes/pull/45523), [@colemickens](https://github.com/colemickens)) + + * Fix support for multiple loadBalancerSourceRanges ([#45523](https://github.com/kubernetes/kubernetes/pull/45523), [@colemickens](https://github.com/colemickens)) + + * Support the Service spec's sessionAffinity ([#45523](https://github.com/kubernetes/kubernetes/pull/45523), [@colemickens](https://github.com/colemickens)) + + * Added exponential backoff to Azure cloudprovider ([#46660](https://github.com/kubernetes/kubernetes/pull/46660), [@jackfrancis](https://github.com/jackfrancis)) + + * Add support for bring-your-own ip address for Services on Azure ([#42034](https://github.com/kubernetes/kubernetes/pull/42034), [@brendandburns](https://github.com/brendandburns)) + + * Add support for Azure internal load balancer ([#43510](https://github.com/kubernetes/kubernetes/pull/43510), [@karataliu](https://github.com/karataliu)) + + * Client poll duration is now 5 seconds ([#43699](https://github.com/kubernetes/kubernetes/pull/43699), [@colemickens](https://github.com/colemickens)) + + * Azure plugin for client auth ([#43987](https://github.com/kubernetes/kubernetes/pull/43987), [@cosmincojocar](https://github.com/cosmincojocar)) + + +* GCP: + + * Bump GLBC version to 0.9.5 - fixes [loss of manually modified GCLB health check settings](https://github.com/kubernetes/kubernetes/issues/47559) upon upgrade from pre-1.6.4 to either 1.6.4 or 1.6.5. ([#47567](https://github.com/kubernetes/kubernetes/pull/47567), [@nicksardo](https://github.com/nicksardo)) + + * [beta] Support creation of GCP Internal Load Balancers from Service objects ([#46663](https://github.com/kubernetes/kubernetes/pull/46663), [@nicksardo](https://github.com/nicksardo)) + + * GCE installs will now avoid IP masquerade for all RFC-1918 IP blocks, rather than just 10.0.0.0/8. This means that clusters can be created in 192.168.0.0./16 and 172.16.0.0/12 while preserving the container IPs (which would be lost before). ([#46473](https://github.com/kubernetes/kubernetes/pull/46473), [@thockin](https://github.com/thockin)) + + * The Calico version included in kube-up for GCE has been updated to v2.2. ([#38169](https://github.com/kubernetes/kubernetes/pull/38169), [@caseydavenport](https://github.com/caseydavenport)) + + * ip-masq-agent is now on by default for GCE ([#47794](https://github.com/kubernetes/kubernetes/pull/47794), [@dnardo](https://github.com/dnardo)) + + * Add ip-masq-agent addon to the addons folder which is used in GCE if `--non-masquerade-cidr` is set to 0/0 ([#46038](https://github.com/kubernetes/kubernetes/pull/46038), [@dnardo](https://github.com/dnardo)) + + * Enable kubelet csr bootstrap in GCE/GKE ([#40760](https://github.com/kubernetes/kubernetes/pull/40760), [@mikedanese](https://github.com/mikedanese)) + + * Adds support for allocation of pod IPs via IP aliases. ([#42147](https://github.com/kubernetes/kubernetes/pull/42147), [@bowei](https://github.com/bowei)) + + * gce kube-up: The Node authorization mode and NodeRestriction admission controller are now enabled ([#46796](https://github.com/kubernetes/kubernetes/pull/46796), [@mikedanese](https://github.com/mikedanese)) + + * Tokens retrieved from Google Cloud with application default credentials will not be cached if the client fails authorization ([#46694](https://github.com/kubernetes/kubernetes/pull/46694), [@matt-tyler](https://github.com/matt-tyler)) + + * Add metrics to all major gce operations {latency, errors} ([#44510](https://github.com/kubernetes/kubernetes/pull/44510), [@bowei](https://github.com/bowei)) + + * The new metrics are: + + * cloudprovider_gce_api_request_duration_seconds{request, region, zone} + + * cloudprovider_gce_api_request_errors{request, region, zone} + + * request is the specific function that is used. + + * region is the target region (Will be "" if not applicable) + + * zone is the target zone (Will be "" if not applicable) + + * Note: this fixes some issues with the previous implementation of metrics for disks: + + * Time duration tracked was of the initial API call, not the entire operation. + + * Metrics label tuple would have resulted in many independent histograms stored, one for each disk. (Did not aggregate well). + + * Fluentd now tolerates all NoExecute Taints when run in gcp configuration. ([#45715](https://github.com/kubernetes/kubernetes/pull/45715), [@gmarek](https://github.com/gmarek)) + + * Taints support in gce/salt startup scripts. ([#47632](https://github.com/kubernetes/kubernetes/pull/47632), [@mwielgus](https://github.com/mwielgus)) + + * GCE installs will now avoid IP masquerade for all RFC-1918 IP blocks, rather than just 10.0.0.0/8. This means that clusters can ([#46473](https://github.com/kubernetes/kubernetes/pull/46473), [@thockin](https://github.com/thockin)) be created in 192.168.0.0./16 and 172.16.0.0/12 while preserving the container IPs (which would be lost before). + + * Support running Ubuntu image on GCE node ([#44744](https://github.com/kubernetes/kubernetes/pull/44744), [@yguo0905](https://github.com/yguo0905)) + + * The gce metadata server can now be hidden behind a proxy, hiding the kubelet's token. ([#45565](https://github.com/kubernetes/kubernetes/pull/45565), [@Q-Lee](https://github.com/Q-Lee)) + +* OpenStack: + + * Fix issue during LB creation where ports were incorrectly assigned to a floating IP ([#44387](https://github.com/kubernetes/kubernetes/pull/44387), [@jamiehannaford](https://github.com/jamiehannaford)) + + * Openstack cinder v1/v2/auto API support ([#40423](https://github.com/kubernetes/kubernetes/pull/40423), [@mkutsevol](https://github.com/mkutsevol)) + + * OpenStack clusters can now specify whether worker nodes are assigned a floating IP ([#42638](https://github.com/kubernetes/kubernetes/pull/42638), [@jamiehannaford](https://github.com/jamiehannaford)) + + +* vSphere: + + * Fix volume detach on node failure. ([#45569](https://github.com/kubernetes/kubernetes/pull/45569), [@divyenpatel](https://github.com/divyenpatel)) + + * Report same Node IP as both internal and external. ([#45201](https://github.com/kubernetes/kubernetes/pull/45201), [@abrarshivani](https://github.com/abrarshivani)) + + * Filter out IPV6 node addresses. ([#45181](https://github.com/kubernetes/kubernetes/pull/45181), [@BaluDontu](https://github.com/BaluDontu)) + + * Fix fetching of VM UUID on Ubuntu 16.04 and Fedora. ([#45311](https://github.com/kubernetes/kubernetes/pull/45311), [@divyenpatel](https://github.com/divyenpatel)) + + +#### Cluster Provisioning +* Juju: + + * Add Kubernetes 1.6 support to Juju charms ([#44500](https://github.com/kubernetes/kubernetes/pull/44500), [@Cynerva](https://github.com/Cynerva)) + + * Add metric collection to charms for autoscaling + + * Update kubernetes-e2e charm to fail when test suite fails + + * Update Juju charms to use snaps + + * Add registry action to the kubernetes-worker charm + + * Add support for kube-proxy cluster-cidr option to kubernetes-worker charm + + * Fix kubernetes-master charm starting services before TLS certs are saved + + * Fix kubernetes-worker charm failures in LXD + + * Fix stop hook failure on kubernetes-worker charm + + * Fix handling of juju kubernetes-worker.restart-needed state + + * Fix nagios checks in charms + + * Enable GPU mode if GPU hardware detected ([#43467](https://github.com/kubernetes/kubernetes/pull/43467), [@tvansteenburgh](https://github.com/tvansteenburgh)) + + * Fix ceph-secret type to kubernetes.io/rbd in kubernetes-master charm ([#44635](https://github.com/kubernetes/kubernetes/pull/44635), [@Cynerva](https://github.com/Cynerva)) + + * Disallows installation of upstream docker from PPA in the Juju kubernetes-worker charm. ([#44681](https://github.com/kubernetes/kubernetes/pull/44681), [@wwwtyro](https://github.com/wwwtyro)) + + * Resolves juju vsphere hostname bug showing only a single node in a scaled node-pool. ([#44780](https://github.com/kubernetes/kubernetes/pull/44780), [@chuckbutler](https://github.com/chuckbutler)) + + * Fixes a bug in the kubernetes-worker Juju charm code that attempted to give kube-proxy more than one api endpoint. ([#44677](https://github.com/kubernetes/kubernetes/pull/44677), [@wwwtyro](https://github.com/wwwtyro)) + + * Added CIFS PV support for Juju Charms ([#45117](https://github.com/kubernetes/kubernetes/pull/45117), [@chuckbutler](https://github.com/chuckbutler)) + + * Fixes juju kubernetes master: 1. Get certs from a dead leader. 2. Append tokens. ([#43620](https://github.com/kubernetes/kubernetes/pull/43620), [@ktsakalozos](https://github.com/ktsakalozos)) + + * kubernetes-master juju charm properly detects etcd-scale events and reconfigures appropriately. ([#44967](https://github.com/kubernetes/kubernetes/pull/44967), [@chuckbutler](https://github.com/chuckbutler)) + + * Use correct option name in the kubernetes-worker layer registry action ([#44921](https://github.com/kubernetes/kubernetes/pull/44921), [@jacekn](https://github.com/jacekn)) + + * Send dns details only after cdk-addons are configured ([#44945](https://github.com/kubernetes/kubernetes/pull/44945), [@ktsakalozos](https://github.com/ktsakalozos)) + + * Added support to the pause action in the kubernetes-worker charm for new flag `--delete-local-data` ([#44931](https://github.com/kubernetes/kubernetes/pull/44931), [@chuckbutler](https://github.com/chuckbutler)) + + * Add namespace-{list, create, delete} actions to the kubernetes-master layer ([#44277](https://github.com/kubernetes/kubernetes/pull/44277), [@jacekn](https://github.com/jacekn)) + + * Using http2 in kubeapi-load-balancer to fix `kubectl exec` uses ([#43625](https://github.com/kubernetes/kubernetes/pull/43625), [@mbruzek](https://github.com/mbruzek)) + + + * Don't append :443 to registry domain in the kubernetes-worker layer registry action ([#45550](https://github.com/kubernetes/kubernetes/pull/45550), [@jacekn](https://github.com/jacekn)) + +* kubeadm + + * Enable the Node Authorizer/Admission plugin in v1.7 ([#46879](https://github.com/kubernetes/kubernetes/pull/46879), [@luxas](https://github.com/luxas)) + + * Users can now pass extra parameters to etcd in a kubeadm cluster ([#42246](https://github.com/kubernetes/kubernetes/pull/42246), [@jamiehannaford](https://github.com/jamiehannaford)) + + * Make kubeadm use the new CSR approver in v1.7 ([#46864](https://github.com/kubernetes/kubernetes/pull/46864), [@luxas](https://github.com/luxas)) + + * Allow enabling multiple authorization modes at the same time ([#42557](https://github.com/kubernetes/kubernetes/pull/42557), [@xilabao](https://github.com/xilabao)) + + * add proxy client-certs to kube-apiserver to allow it to proxy aggregated api servers ([#43715](https://github.com/kubernetes/kubernetes/pull/43715), [@deads2k](https://github.com/deads2k))* CentOS provider + +* hyperkube + + * The hyperkube image has been slimmed down and no longer includes addon manifests and other various scripts. These were introduced for the now removed docker-multinode setup system. ([#44555](https://github.com/kubernetes/kubernetes/pull/44555), [@luxas](https://github.com/luxas)) + +* Support secure etcd cluster for centos provider. ([#42994](https://github.com/kubernetes/kubernetes/pull/42994), [@Shawyeok](https://github.com/Shawyeok)) + +* Update to kube-addon-manager:v6.4-beta.2: kubectl v1.6.4 and refreshed base images ([#47389](https://github.com/kubernetes/kubernetes/pull/47389), [@ixdy](https://github.com/ixdy)) + +* Remove Initializers from admission-control in kubernetes-master charm for pre-1.7 ([#46987](https://github.com/kubernetes/kubernetes/pull/46987), [@Cynerva](https://github.com/Cynerva)) + +* Added state guards to the idle_status messaging in the kubernetes-master charm to make deployment faster on initial deployment. ([#47183](https://github.com/kubernetes/kubernetes/pull/47183), [@chuckbutler](https://github.com/chuckbutler)) + +#### Cluster federation +* Features: + + * Adds annotations to all Federation objects created by kubefed. ([#42683](https://github.com/kubernetes/kubernetes/pull/42683), [@perotinus](https://github.com/perotinus)) + + * Mechanism of adding `federation domain maps` to kube-dns deployment via `--federations` flag is superseded by adding/updating `federations` key in `kube-system/kube-dns` configmap. If user is using kubefed tool to join cluster federation, adding federation domain maps to kube-dns is already taken care by `kubefed join` and does not need further action. + + * Prints out status updates when running `kubefed init` ([#41849](https://github.com/kubernetes/kubernetes/pull/41849), [@perotinus](https://github.com/perotinus)) + + * `kubefed init` now supports overriding the default etcd image name with the `--etcd-image` parameter. ([#46247](https://github.com/kubernetes/kubernetes/pull/46247), [@marun](https://github.com/marun)) + + * kubefed will now configure NodeInternalIP as the federation API server endpoint when NodeExternalIP is unavailable for federation API servers exposed as NodePort services ([#46960](https://github.com/kubernetes/kubernetes/pull/46960), [@lukaszo](https://github.com/lukaszo)) + + * Automate configuring nameserver in cluster-dns for CoreDNS provider ([#42895](https://github.com/kubernetes/kubernetes/pull/42895), [@shashidharatd](https://github.com/shashidharatd)) + + * A new controller for managing DNS records is introduced which can be optionally disabled to enable third party components to manage DNS records for federated services. ([#45034](https://github.com/kubernetes/kubernetes/pull/45034), [@shashidharatd](https://github.com/shashidharatd)) + + * Remove the `--secret-name` flag from `kubefed join`, instead generating the secret name arbitrarily. ([#42513](https://github.com/kubernetes/kubernetes/pull/42513), [@perotinus](https://github.com/perotinus)) + + * Use StorageClassName for etcd pvc ([#46323](https://github.com/kubernetes/kubernetes/pull/46323), [@marun](https://github.com/marun)) + +* Bug fixes: + + * Allow disabling federation controllers through override args ([#44209](https://github.com/kubernetes/kubernetes/pull/44209), [@irfanurrehman](https://github.com/irfanurrehman)) + + * Kubefed: Use service accounts instead of the user's credentials when accessing joined clusters' API servers. ([#42042](https://github.com/kubernetes/kubernetes/pull/42042), [@perotinus](https://github.com/perotinus)) + + * Avoid panic if route53 fields are nil ([#44380](https://github.com/kubernetes/kubernetes/pull/44380), [@justinsb](https://github.com/justinsb)) + + +#### Credential provider +* add rancher credential provider ([#40160](https://github.com/kubernetes/kubernetes/pull/40160), [@wlan0](https://github.com/wlan0)) + +#### Information for Kubernetes clients (openapi, swagger, client-go) +* Features: + + * Add Host field to TCPSocketAction ([#42902](https://github.com/kubernetes/kubernetes/pull/42902), [@louyihua](https://github.com/louyihua)) + + * Add the ability to lock on ConfigMaps to support HA for self hosted components ([#42666](https://github.com/kubernetes/kubernetes/pull/42666), [@timothysc](https://github.com/timothysc)) + + * validateClusterInfo: use clientcmdapi.NewCluster() ([#44221](https://github.com/kubernetes/kubernetes/pull/44221), [@ncdc](https://github.com/ncdc)) + + * OpenAPI spec is now available in protobuf binary and gzip format (with ETag support) ([#45836](https://github.com/kubernetes/kubernetes/pull/45836), [@mbohlool](https://github.com/mbohlool)) + + * HostAliases is now parsed with hostAliases json keys to be in line with the feature's name. ([#47512](https://github.com/kubernetes/kubernetes/pull/47512), [@rickypai](https://github.com/rickypai)) + + * Add redirect support to SpdyRoundTripper ([#44451](https://github.com/kubernetes/kubernetes/pull/44451), [@ncdc](https://github.com/ncdc)) + + * Duplicate recurring Events now include the latest event's Message string ([#46034](https://github.com/kubernetes/kubernetes/pull/46034), [@kensimon](https://github.com/kensimon)) + +* Bug fixes: + + * Fix serialization of EnforceNodeAllocatable ([#44606](https://github.com/kubernetes/kubernetes/pull/44606), [@ivan4th](https://github.com/ivan4th)) + + * Use OS-specific libs when computing client User-Agent in kubectl, etc. ([#44423](https://github.com/kubernetes/kubernetes/pull/44423), [@monopole](https://github.com/monopole)) + + +#### Instrumentation +* Bumped Heapster to v1.4.0. More details about the release https://github.com/kubernetes/heapster/releases/tag/v1.4.0 + +* Fluentd manifest pod is no longer created on non-registered master when creating clusters using kube-up.sh. ([#44721](https://github.com/kubernetes/kubernetes/pull/44721), [@piosz](https://github.com/piosz)) + +* Stackdriver cluster logging now deploys a new component to export Kubernetes events. ([#46700](https://github.com/kubernetes/kubernetes/pull/46700), [@crassirostris](https://github.com/crassirostris)) + +* Stackdriver Logging deployment exposes metrics on node port 31337 when enabled. ([#47402](https://github.com/kubernetes/kubernetes/pull/47402), [@crassirostris](https://github.com/crassirostris)) + +* Upgrade Elasticsearch Addon to v5.4.0 ([#45589](https://github.com/kubernetes/kubernetes/pull/45589), [@it-svit](https://github.com/it-svit)) + +#### Internal storage layer +* prevent pods/status from touching ownerreferences ([#45826](https://github.com/kubernetes/kubernetes/pull/45826), [@deads2k](https://github.com/deads2k)) + +* Ensure that autoscaling/v1 is the preferred version for API discovery when autoscaling/v2alpha1 is enabled. ([#45741](https://github.com/kubernetes/kubernetes/pull/45741), [@DirectXMan12](https://github.com/DirectXMan12)) + +* The proxy subresource APIs for nodes, services, and pods now support the HTTP PATCH method. ([#44929](https://github.com/kubernetes/kubernetes/pull/44929), [@liggitt](https://github.com/liggitt)) + +* Fluentd now tolerates all NoExecute Taints when run in gcp configuration. ([#45715](https://github.com/kubernetes/kubernetes/pull/45715), [@gmarek](https://github.com/gmarek)) + + +#### Kubernetes Dashboard + +* Increase Dashboard's memory requests and limits ([#44712](https://github.com/kubernetes/kubernetes/pull/44712), [@maciaszczykm](https://github.com/maciaszczykm)) + +* Update Dashboard version to 1.6.1 ([#45953](https://github.com/kubernetes/kubernetes/pull/45953), [@maciaszczykm](https://github.com/maciaszczykm)) + + +#### kube-dns +* Updates kube-dns to 1.14.2 ([#45684](https://github.com/kubernetes/kubernetes/pull/45684), [@bowei](https://github.com/bowei)) + + * Support kube-master-url flag without kubeconfig + + * Fix concurrent R/Ws in dns.go + + * Fix confusing logging when initialize server + + * Fix printf in cmd/kube-dns/app/server.go + + * Fix version on startup and `--version` flag + + * Support specifying port number for nameserver in stubDomains + +#### kube-proxy +* Features: + + * ratelimit runs of iptables by sync-period flags ([#46266](https://github.com/kubernetes/kubernetes/pull/46266), [@thockin](https://github.com/thockin)) + + * Log warning when invalid dir passed to `kubectl proxy --www` ([#44952](https://github.com/kubernetes/kubernetes/pull/44952), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * Add `--write-config-to` flag to kube-proxy to allow users to write the default configuration settings to a file. ([#45908](https://github.com/kubernetes/kubernetes/pull/45908), [@ncdc](https://github.com/ncdc)) + + * When switching from the service.beta.kubernetes.io/external-traffic annotation to the new ([#46716](https://github.com/kubernetes/kubernetes/pull/46716), [@thockin](https://github.com/thockin)) externalTrafficPolicy field, the values chnag as follows: * "OnlyLocal" becomes "Local" * "Global" becomes "Cluster". + + +* Bug fixes: + + * Fix corner-case with OnlyLocal Service healthchecks. ([#44313](https://github.com/kubernetes/kubernetes/pull/44313), [@thockin](https://github.com/thockin)) + + * Fix DNS suffix search list support in Windows kube-proxy. ([#45642](https://github.com/kubernetes/kubernetes/pull/45642), [@JiangtianLi](https://github.com/JiangtianLi)) + +#### kube-scheduler +* Scheduler can receive its policy configuration from a ConfigMap ([#43892](https://github.com/kubernetes/kubernetes/pull/43892), [@bsalamat](https://github.com/bsalamat)) + +* Aggregated used ports at the NodeInfo level for PodFitsHostPorts predicate. ([#42524](https://github.com/kubernetes/kubernetes/pull/42524), [@k82cn](https://github.com/k82cn)) + +* leader election lock based on scheduler name ([#42961](https://github.com/kubernetes/kubernetes/pull/42961), [@wanghaoran1988](https://github.com/wanghaoran1988)) + + * Fix DNS suffix search list support in Windows kube-proxy. ([#45642](https://github.com/kubernetes/kubernetes/pull/45642), [@JiangtianLi](https://github.com/JiangtianLi)) + +#### Storage + +* Features + + * The options passed to a Flexvolume plugin's mount command now contains the pod name (kubernetes.io/pod.name), namespace (kubernetes.io/pod.namespace), uid (kubernetes.io/pod.uid), and service account name (kubernetes.io/serviceAccount.name). ([#39488](https://github.com/kubernetes/kubernetes/pull/39488), [@liggitt](https://github.com/liggitt)) + + * GCE and AWS dynamic provisioners extension: admins can configure zone(s) in which a persistent volume shall be created. ([#38505](https://github.com/kubernetes/kubernetes/pull/38505), [@pospispa](https://github.com/pospispa)) + + * Implement API usage metrics for GCE storage. ([#40338](https://github.com/kubernetes/kubernetes/pull/40338), [@gnufied](https://github.com/gnufied)) + + * Add support for emitting metrics from openstack cloudprovider about storage operations. ([#46008](https://github.com/kubernetes/kubernetes/pull/46008), [@NickrenREN](https://github.com/NickrenREN)) + + * vSphere cloud provider: vSphere storage policy support for dynamic volume provisioning. ([#46176](https://github.com/kubernetes/kubernetes/pull/46176), [@BaluDontu](https://github.com/BaluDontu)) + + * Support StorageClass in Azure file volume ([#42170](https://github.com/kubernetes/kubernetes/pull/42170), [@rootfs](https://github.com/rootfs)) + + * Start recording cloud provider metrics for AWS ([#43477](https://github.com/kubernetes/kubernetes/pull/43477), [@gnufied](https://github.com/gnufied)) + + * Support iSCSI CHAP authentication ([#43396](https://github.com/kubernetes/kubernetes/pull/43396), [@rootfs](https://github.com/rootfs)) + + * Openstack cinder v1/v2/auto API support ([#40423](https://github.com/kubernetes/kubernetes/pull/40423), [@mkutsevol](https://github.com/mkutsevol)](https://github.com/kubernetes/kubernetes/pull/41498), [@mikebryant](https://github.com/mikebryant)) + + * Alpha feature: allows users to set storage limit to isolate EmptyDir volumes. It enforces the limit by evicting pods that exceed their storage limits ([#45686](https://github.com/kubernetes/kubernetes/pull/45686), [@jingxu97](https://github.com/jingxu97)) + +* Bug fixes + + * Fixes issue with Flexvolume, introduced in 1.6.0, where drivers without an attacher would fail (node indefinitely waiting for attach). A driver API addition is introduced: drivers that don't implement attach should return attach: false on init. ([#47503](https://github.com/kubernetes/kubernetes/pull/47503), [@chakri-nelluri](https://github.com/chakri-nelluri)) + + * Fix dynamic provisioning of PVs with inaccurate AccessModes by refusing to provision when PVCs ask for AccessModes that can't be satisfied by the PVs' underlying volume plugin. ([#47274](https://github.com/kubernetes/kubernetes/pull/47274), [@wongma7](https://github.com/wongma7)) + + * Fix pods failing to start if they specify a file as a volume subPath to mount. ([#45623](https://github.com/kubernetes/kubernetes/pull/45623), [@wongma7](https://github.com/wongma7)) + + * Fix erroneous FailedSync and FailedMount events being periodically and indefinitely posted on Pods after kubelet is restarted. ([#44781](https://github.com/kubernetes/kubernetes/pull/44781), [@wongma7](https://github.com/wongma7)) + + * Fix AWS EBS volumes not getting detached from node if routine to verify volumes are attached runs while the node is down ([#46463](https://github.com/kubernetes/kubernetes/pull/46463), [@wongma7](https://github.com/wongma7)) + + * Improves performance of Cinder volume attach/detach operations. ([#41785](https://github.com/kubernetes/kubernetes/pull/41785), [@jamiehannaford](https://github.com/jamiehannaford)) + + * Fix iSCSI iSER mounting. ([#47281](https://github.com/kubernetes/kubernetes/pull/47281), [@mtanino](https://github.com/mtanino)) + + * iscsi storage plugin: Fix dangling session when using multiple target portal addresses. ([#46239](https://github.com/kubernetes/kubernetes/pull/46239), [@mtanino](https://github.com/mtanino)) + + + * Fix log spam due to unnecessary status update when node is deleted. ([#45923](https://github.com/kubernetes/kubernetes/pull/45923), [@verult](https://github.com/verult)) + + * Don't try to attach volume to new node if it is already attached to another node and the volume does not support multi-attach. ([#45346](https://github.com/kubernetes/kubernetes/pull/45346), [@codablock](https://github.com/codablock)) + + * detach the volume when pod is terminated ([#45286](https://github.com/kubernetes/kubernetes/pull/45286), [@gnufied](https://github.com/gnufied)) + + * Roll up volume error messages in the kubelet sync loop. ([#44938](https://github.com/kubernetes/kubernetes/pull/44938), [@jayunit100](https://github.com/jayunit100)) + + * Catch error when failed to make directory in NFS volume plugin ([#38801](https://github.com/kubernetes/kubernetes/pull/38801), [@nak3](https://github.com/nak3)) + + + +#### Networking + +* DNS and name resolution + + * Updates kube-dns to 1.14.2 ([#45684](https://github.com/kubernetes/kubernetes/pull/45684), [@bowei](https://github.com/bowei)) + + * Support kube-master-url flag without kubeconfig + + * Fix concurrent R/Ws in dns.go + + * Fix confusing logging when initializing server + + * Support specifying port number for nameserver in stubDomains + + * A new field hostAliases has been added to pod.spec to support adding entries to a Pod's /etc/hosts file. ([#44641](https://github.com/kubernetes/kubernetes/pull/44641), [@rickypai](https://github.com/rickypai)) + + * Fix DNS suffix search list support in Windows kube-proxy. ([#45642](https://github.com/kubernetes/kubernetes/pull/45642), [@JiangtianLi](https://github.com/JiangtianLi)) + +* Kube-proxy + + * ratelimit runs of iptables by sync-period flags ([#46266](https://github.com/kubernetes/kubernetes/pull/46266), [@thockin](https://github.com/thockin)) + + * Fix corner-case with OnlyLocal Service healthchecks. ([#44313](https://github.com/kubernetes/kubernetes/pull/44313), [@thockin](https://github.com/thockin)) + +* Exclude nodes labeled as master from LoadBalancer / NodePort; restores documented behaviour. ([#44745](https://github.com/kubernetes/kubernetes/pull/44745), [@justinsb](https://github.com/justinsb)) + +* Adds support for CNI ConfigLists, which permit plugin chaining. ([#42202](https://github.com/kubernetes/kubernetes/pull/42202), [@squeed](https://github.com/squeed)) + +* Fix node selection logic on initial LB creation ([#45773](https://github.com/kubernetes/kubernetes/pull/45773), [@justinsb](https://github.com/justinsb)) + +* When switching from the service.beta.kubernetes.io/external-traffic annotation to the new externalTrafficPolicy field, the values change as follows: * "OnlyLocal" becomes "Local" * "Global" becomes "Cluster". ([#46716](https://github.com/kubernetes/kubernetes/pull/46716), [@thockin](https://github.com/thockin)) + +* servicecontroller: Fix node selection logic on initial LB creation ([#45773](https://github.com/kubernetes/kubernetes/pull/45773), [@justinsb](https://github.com/justinsb)) + +* fixed HostAlias in PodSpec to allow foo.bar hostnames instead of just foo DNS labels. ([#46809](https://github.com/kubernetes/kubernetes/pull/46809), [@rickypai](https://github.com/rickypai)) + + +#### Node controller +* Bug fixes: + + * Fix [transition between NotReady and Unreachable taints](https://github.com/kubernetes/kubernetes/issues/43444). ([#44042](https://github.com/kubernetes/kubernetes/pull/44042), [@gmarek](https://github.com/gmarek)) + + +#### Node Components + +* Features + + * Removes the deprecated kubelet flag `--babysit-daemons` ([#44230](https://github.com/kubernetes/kubernetes/pull/44230), [@mtaufen](https://github.com/mtaufen)) + + * make dockershim.sock configurable ([#43914](https://github.com/kubernetes/kubernetes/pull/43914), [@ncdc](https://github.com/ncdc)) + + * Support running Ubuntu image on GCE node ([#44744](https://github.com/kubernetes/kubernetes/pull/44744), [@yguo0905](https://github.com/yguo0905)) + + * Kubernetes now shares a single PID namespace among all containers in a pod when running with docker >= 1.13.1. This means processes can now signal processes in other containers in a pod, but it also means that the `kubectl exec {pod} kill 1` pattern will cause the Pod to be restarted rather than a single container. ([#45236](https://github.com/kubernetes/kubernetes/pull/45236), [@verb](https://github.com/verb)) + + * A new field hostAliases has been added to the pod spec to support [adding entries to a Pod's /etc/hosts file](https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/). ([#44641](https://github.com/kubernetes/kubernetes/pull/44641), [@rickypai](https://github.com/rickypai)) + + * With `--feature-gates=RotateKubeletClientCertificate=true` set, the Kubelet will ([#41912](https://github.com/kubernetes/kubernetes/pull/41912), [@jcbsmpsn](https://github.com/jcbsmpsn)) + + * request a client certificate from the API server during the boot cycle and pause + + * waiting for the request to be satisfied. It will continually refresh the certificate + + * Create clusters with GPUs in GCE by specifying `type=,count=` to NODE_ACCELERATORS environment variable. ([#45130](https://github.com/kubernetes/kubernetes/pull/45130), [@vishh](https://github.com/vishh)) + + * List of available GPUs - [https://cloud.google.com/compute/docs/gpus/#introduction](https://cloud.google.com/compute/docs/gpus/#introduction) + + * Disk Pressure triggers the deletion of terminated containers on the node. ([#45896](https://github.com/kubernetes/kubernetes/pull/45896), [@dashpole](https://github.com/dashpole)) + + * Support status.hostIP in downward API ([#42717](https://github.com/kubernetes/kubernetes/pull/42717), [@andrewsykim](https://github.com/andrewsykim)) + + * Upgrade Node Problem Detector to v0.4.1. New features added: + + * Add /dev/kmsg support for kernel log parsing. ([#112](https://github.com/kubernetes/node-problem-detector/pull/112), [@euank](https://github.com/euank)) + + * Add ABRT support. ([#105](https://github.com/kubernetes/node-problem-detector/pull/105), [@juliusmilan](https://github.com/juliusmilan)) + + * Add a docker image corruption problem detection in the default docker monitor config. ([#117](https://github.com/kubernetes/node-problem-detector/pull/117), [@ajitak](https://github.com/ajitak)) + + * Upgrade CAdvisor to v0.26.1. New features added: + + * Add Docker overlay2 storage driver support. + + * Add ZFS support. + + * Add UDP metrics (collection disabled by default). + + * Roll up volume error messages in the kubelet sync loop. ([#44938](https://github.com/kubernetes/kubernetes/pull/44938), [@jayunit100](https://github.com/jayunit100)) + + * Allow pods to opt out of PodPreset mutation via an annotation on the pod. ([#44965](https://github.com/kubernetes/kubernetes/pull/44965), [@jpeeler](https://github.com/jpeeler)) + + * Add generic Toleration for NoExecute Taints to NodeProblemDetector, so that NPD can be scheduled to nodes with NoExecute taints by default. ([#45883](https://github.com/kubernetes/kubernetes/pull/45883), [@gmarek](https://github.com/gmarek)) + + * Prevent kubelet from setting allocatable < 0 for a resource upon initial creation. ([#46516](https://github.com/kubernetes/kubernetes/pull/46516), [@derekwaynecarr](https://github.com/derekwaynecarr)) + +* Bug fixes + + * Changed Kubelet default image-gc-high-threshold to 85% to resolve a conflict with default settings in docker that prevented image garbage collection from resolving low disk space situations when using devicemapper storage. ([#40432](https://github.com/kubernetes/kubernetes/pull/40432), [@sjenning](https://github.com/sjenning)) + + * Mark all static pods on the Master node as critical to prevent preemption ([#47356](https://github.com/kubernetes/kubernetes/pull/47356), [@dashpole](https://github.com/dashpole)) + + * Restrict active deadline seconds max allowed value to be maximum uint32 to avoid overflow ([#46640](https://github.com/kubernetes/kubernetes/pull/46640), [@derekwaynecarr](https://github.com/derekwaynecarr)) + + * Fix a bug with cAdvisorPort in the KubeletConfiguration that prevented setting it to 0, which is in fact a valid option, as noted in issue [#11710](https://github.com/kubernetes/kubernetes/pull/11710). ([#46876](https://github.com/kubernetes/kubernetes/pull/46876), [@mtaufen](https://github.com/mtaufen)) + + * Fix a bug where container cannot run as root when SecurityContext.RunAsNonRoot is false. ([#47009](https://github.com/kubernetes/kubernetes/pull/47009), [@yujuhong](https://github.com/yujuhong)) + + * Fix the Kubelet PLEG update timestamp to better reflect the health of the component when the container runtime request hangs. ([#45496](https://github.com/kubernetes/kubernetes/pull/45496), [@andyxning](https://github.com/andyxning)) + + * Avoid failing sync loop health check on container runtime errors ([#47124](https://github.com/kubernetes/kubernetes/pull/47124), [@andyxning](https://github.com/andyxning)) + + * Fix a bug where Kubelet does not ignore pod manifest files starting with dots ([#45111](https://github.com/kubernetes/kubernetes/pull/45111), [@dwradcliffe](https://github.com/dwradcliffe)) + + * Fix kubelet reset liveness probe failure count across pod restart boundaries ([#46371](https://github.com/kubernetes/kubernetes/pull/46371), [@sjenning](https://github.com/sjenning)) + + * Fix log spam due to unnecessary status update when node is deleted. ([#45923](https://github.com/kubernetes/kubernetes/pull/45923), [@verult](https://github.com/verult)) + + * Fix kubelet event recording for selected events. ([#46246](https://github.com/kubernetes/kubernetes/pull/46246), [@derekwaynecarr](https://github.com/derekwaynecarr)) + + * Fix image garbage collector attempting to remove in-use images. ([#46121](https://github.com/kubernetes/kubernetes/pull/46121), [@Random-Liu](https://github.com/Random-Liu)) + + * Detach the volume when pod is terminated ([#45286](https://github.com/kubernetes/kubernetes/pull/45286), [@gnufied](https://github.com/gnufied)) + + * CRI: Fix StopContainer timeout ([#44970](https://github.com/kubernetes/kubernetes/pull/44970), [@Random-Liu](https://github.com/Random-Liu)) + + * CRI: Fix kubelet failing to start when using rkt. ([#44569](https://github.com/kubernetes/kubernetes/pull/44569), [@yujuhong](https://github.com/yujuhong)) + + * CRI: `kubectl logs -f` now stops following when container stops, as it did pre-CRI. ([#44406](https://github.com/kubernetes/kubernetes/pull/44406), [@Random-Liu](https://github.com/Random-Liu)) + + * Fixes a bug where pods were evicted even after images are successfully deleted. ([#44986](https://github.com/kubernetes/kubernetes/pull/44986), [@dashpole](https://github.com/dashpole)) + + * When creating a container using envFrom, ([#42083](https://github.com/kubernetes/kubernetes/pull/42083), [@fraenkel](https://github.com/fraenkel) + * validate the name of the ConfigMap in a ConfigMapRef + * validate the name of the Secret in a SecretRef + + * Fix the bug where StartedAt time is not reported for exited containers. ([#45977](https://github.com/kubernetes/kubernetes/pull/45977), [@yujuhong](https://github.com/yujuhong)) + +* Changes/deprecations + + * Marks the Kubelet's `--master-service-namespace` flag deprecated ([#44250](https://github.com/kubernetes/kubernetes/pull/44250), [@mtaufen](https://github.com/mtaufen)) + + * Remove PodSandboxStatus.Linux.Namespaces.Network from CRI since it is not used/needed. ([#45166](https://github.com/kubernetes/kubernetes/pull/45166), [@feiskyer](https://github.com/feiskyer)) + + * Remove the `--enable-cri` flag. CRI is now the default, and the only way to integrate with Kubelet for the container runtimes.([#45194](https://github.com/kubernetes/kubernetes/pull/45194), [@yujuhong](https://github.com/yujuhong)) + + * CRI has been moved to package pkg/kubelet/apis/cri/v1alpha1/runtime as part of Kubelet API path cleanup. ([#47113](https://github.com/kubernetes/kubernetes/pull/47113), [@feiskyer](https://github.com/feiskyer)) + + +#### Scheduling + +* The fix makes scheduling go routine waiting for cache (e.g. Pod) to be synced. ([#45453](https://github.com/kubernetes/kubernetes/pull/45453), [@k82cn](https://github.com/k82cn)) + +* Move hardPodAffinitySymmetricWeight to scheduler policy config ([#44159](https://github.com/kubernetes/kubernetes/pull/44159), [@wanghaoran1988](https://github.com/wanghaoran1988)) + +* Align Extender's validation with prioritizers. ([#45091](https://github.com/kubernetes/kubernetes/pull/45091), [@k82cn](https://github.com/k82cn)) + +* Removed old scheduler constructor. ([#45472](https://github.com/kubernetes/kubernetes/pull/45472), [@k82cn](https://github.com/k82cn)) + +* Fixes the overflow for priorityconfig- valid range {1, 9223372036854775806}. ([#45122](https://github.com/kubernetes/kubernetes/pull/45122), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + +* Move hardPodAffinitySymmetricWeight to scheduler policy config ([#44159](https://github.com/kubernetes/kubernetes/pull/44159), [@wanghaoran1988](https://github.com/wanghaoran1988)) + + +#### Security +* Features: + + * Permission to use a PodSecurityPolicy can now be granted within a single namespace by allowing the use verb on the podsecuritypolicies resource within the namespace. ([#42360](https://github.com/kubernetes/kubernetes/pull/42360), [@liggitt](https://github.com/liggitt)) + + * Break the 'certificatesigningrequests' controller into a 'csrapprover' controller and 'csrsigner' controller. ([#45514](https://github.com/kubernetes/kubernetes/pull/45514), [@mikedanese](https://github.com/mikedanese)) + + * `kubectl auth can-i` now supports non-resource URLs ([#46432](https://github.com/kubernetes/kubernetes/pull/46432), [@CaoShuFeng](https://github.com/CaoShuFeng)) + + * Promote kubelet tls bootstrap to beta. Add a non-experimental flag to use it and deprecate the old flag. ([#46799](https://github.com/kubernetes/kubernetes/pull/46799), [@mikedanese](https://github.com/mikedanese)) + + * Add the alpha.image-policy.k8s.io/failed-open=true annotation when the image policy webhook encounters an error and fails open. ([#46264](https://github.com/kubernetes/kubernetes/pull/46264), [@Q-Lee](https://github.com/Q-Lee)) + + * Add an AEAD encrypting transformer for storing secrets encrypted at rest ([#41939](https://github.com/kubernetes/kubernetes/pull/41939), [@smarterclayton](https://github.com/smarterclayton)) + + * Add secretbox and AES-CBC encryption modes to at rest encryption. AES-CBC is considered superior to AES-GCM because it is resistant to nonce-reuse attacks, and secretbox uses Poly1305 and XSalsa20. ([#46916](https://github.com/kubernetes/kubernetes/pull/46916), [@smarterclayton](https://github.com/smarterclayton)) + +* Bug fixes: + + * Make gcp auth provider not to override the Auth header if it's already exits ([#45575](https://github.com/kubernetes/kubernetes/pull/45575), [@wanghaoran1988](https://github.com/wanghaoran1988)) + + * The oidc client plugin has reduce round trips and fix scopes requested ([#45317](https://github.com/kubernetes/kubernetes/pull/45317), [@ericchiang](https://github.com/ericchiang)) + + * API requests using impersonation now include the system:authenticated group in the impersonated user automatically. ([#44076](https://github.com/kubernetes/kubernetes/pull/44076), [@liggitt](https://github.com/liggitt)) + + * RBAC role and rolebinding auto-reconciliation is now performed only when the RBAC authorization mode is enabled. ([#43813](https://github.com/kubernetes/kubernetes/pull/43813), [@liggitt](https://github.com/liggitt)) + + * PodSecurityPolicy now recognizes pods that specify runAsNonRoot: false in their security context and does not overwrite the specified value ([#47073](https://github.com/kubernetes/kubernetes/pull/47073), [@Q-Lee](https://github.com/Q-Lee)) + + * Tokens retrieved from Google Cloud with application default credentials will not be cached if the client fails authorization ([#46694](https://github.com/kubernetes/kubernetes/pull/46694), [@matt-tyler](https://github.com/matt-tyler)) + + * Update kube-dns, metadata-proxy, and fluentd-gcp, event-exporter, prometheus-to-sd, and ip-masq-agent addons with new base images containing fixes for CVE-2016-4448, CVE-2016-9841, CVE-2016-9843, CVE-2017-1000366, CVE-2017-2616, and CVE-2017-9526. ([#47877](https://github.com/kubernetes/kubernetes/pull/47877), [@ixdy](https://github.com/ixdy)) + + * Fixed an issue mounting the wrong secret into pods as a service account token. ([#44102](https://github.com/kubernetes/kubernetes/pull/44102), [@ncdc](https://github.com/ncdc)) + +#### Scalability + +* The HorizontalPodAutoscaler controller will now only send updates when it has new status information, reducing the number of writes caused by the controller. ([#47078](https://github.com/kubernetes/kubernetes/pull/47078), [@DirectXMan12](https://github.com/DirectXMan12)) + + +## **External Dependency Version Information** + +Continuous integration builds have used the following versions of external dependencies, however, this is not a strong recommendation and users should consult an appropriate installation or upgrade guide before deciding what versions of etcd, docker or rkt to use. + +* Docker versions 1.10.3, 1.11.2, 1.12.6 have been validated + + * Docker version 1.12.6 known issues + + * overlay2 driver not fully supported + + * live-restore not fully supported + + * no shared pid namespace support + + * Docker version 1.11.2 known issues + + * Kernel crash with Aufs storage driver on Debian Jessie ([#27885](https://github.com/kubernetes/kubernetes/pull/27885)) which can be identified by the [node problem detector](https://kubernetes.io/docs/tasks/debug-application-cluster/monitor-node-health/) + + * Leaked File descriptors ([#275](https://github.com/docker/containerd/issues/275)) + + * Additional memory overhead per container ([#21737](https://github.com/kubernetes/kubernetes/pull/21737)) + + * Docker 1.10.3 contains [backports provided by RedHat](https://github.com/docker/docker/compare/v1.10.3...runcom:docker-1.10.3-stable) for known issues + +* For issues with Docker 1.13.X please see the [1.13.X tracking issue](https://github.com/kubernetes/kubernetes/issues/42926) + +* rkt version 1.23.0+ + + * known issues with the rkt runtime are [listed in the Getting Started Guide](https://kubernetes.io/docs/getting-started-guides/rkt/notes/) + +* etcd version 3.0.17 + +* Go version: 1.8.3. [Link to announcement](https://groups.google.com/d/msg/kubernetes-dev/0XRRz6UhhTM/YODWVnuDBQAJ) + + * Kubernetes can only be compiled with Go 1.8. Support for all other versions is dropped. + + +### Previous Releases Included in v1.7.0 +- [v1.7.0-rc.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-rc1) +- [v1.7.0-beta.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-beta2) +- [v1.7.0-beta.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-beta1) +- [v1.7.0-alpha.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-alpha4) +- [v1.7.0-alpha.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-alpha3) +- [v1.7.0-alpha.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-alpha2) +- [v1.7.0-alpha.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v170-alpha1) + + + # v1.7.0-rc.1 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) @@ -785,37 +2296,41 @@ filename | sha256 hash * Support updating storageclasses in etcd to storage.k8s.io/v1. You must do this prior to upgrading to 1.8. ([#46116](https://github.com/kubernetes/kubernetes/pull/46116), [@ncdc](https://github.com/ncdc)) * The namespace API object no longer supports the deletecollection operation. ([#46407](https://github.com/kubernetes/kubernetes/pull/46407), [@liggitt](https://github.com/liggitt)) * NetworkPolicy has been moved from `extensions/v1beta1` to the new ([#39164](https://github.com/kubernetes/kubernetes/pull/39164), [@danwinship](https://github.com/danwinship)) - * `networking.k8s.io/v1` API group. The structure remains unchanged from - * the beta1 API. - * The `net.beta.kubernetes.io/network-policy` annotation on Namespaces - * to opt in to isolation has been removed. Instead, isolation is now - * determined at a per-pod level, with pods being isolated if there is - * any NetworkPolicy whose spec.podSelector targets them. Pods that are - * targeted by NetworkPolicies accept traffic that is accepted by any of - * the NetworkPolicies (and nothing else), and pods that are not targeted - * by any NetworkPolicy accept all traffic by default. - * Action Required: - * When upgrading to Kubernetes 1.7 (and a network plugin that supports - * the new NetworkPolicy v1 semantics), to ensure full behavioral - * compatibility with v1beta1: - * 1. In Namespaces that previously had the "DefaultDeny" annotation, - * you can create equivalent v1 semantics by creating a - * NetworkPolicy that matches all pods but does not allow any - * traffic: - * kind: NetworkPolicy - * apiVersion: networking.k8s.io/v1 - * metadata: - * name: default-deny - * spec: - * podSelector: - * This will ensure that pods that aren't matched by any other - * NetworkPolicy will continue to be fully-isolated, as they were - * before. - * 2. In Namespaces that previously did not have the "DefaultDeny" - * annotation, you should delete any existing NetworkPolicy - * objects. These would have had no effect before, but with v1 - * semantics they might cause some traffic to be blocked that you - * didn't intend to be blocked. + `networking.k8s.io/v1` API group. The structure remains unchanged from + the beta1 API. + The `net.beta.kubernetes.io/network-policy` annotation on Namespaces + to opt in to isolation has been removed. Instead, isolation is now + determined at a per-pod level, with pods being isolated if there is + any NetworkPolicy whose spec.podSelector targets them. Pods that are + targeted by NetworkPolicies accept traffic that is accepted by any of + the NetworkPolicies (and nothing else), and pods that are not targeted + by any NetworkPolicy accept all traffic by default. + Action Required: + When upgrading to Kubernetes 1.7 (and a network plugin that supports + the new NetworkPolicy v1 semantics), to ensure full behavioral + compatibility with v1beta1: + 1. In Namespaces that previously had the "DefaultDeny" annotation, + you can create equivalent v1 semantics by creating a + NetworkPolicy that matches all pods but does not allow any + traffic: + + ```yaml + kind: NetworkPolicy + apiVersion: networking.k8s.io/v1 + metadata: + name: default-deny + spec: + podSelector: + ``` + + This will ensure that pods that aren't matched by any other + NetworkPolicy will continue to be fully-isolated, as they were + before. + 2. In Namespaces that previously did not have the "DefaultDeny" + annotation, you should delete any existing NetworkPolicy + objects. These would have had no effect before, but with v1 + semantics they might cause some traffic to be blocked that you + didn't intend to be blocked. ### Other notable changes @@ -937,7 +2452,6 @@ filename | sha256 hash * The Prometheus metrics for the kube-apiserver for tracking incoming API requests and latencies now return the `subresource` label for correctly attributing the type of API call. ([#46354](https://github.com/kubernetes/kubernetes/pull/46354), [@smarterclayton](https://github.com/smarterclayton)) * Add Simplified Chinese translation for kubectl ([#45573](https://github.com/kubernetes/kubernetes/pull/45573), [@shiywang](https://github.com/shiywang)) * The --namespace flag is now honored for in-cluster clients that have an empty configuration. ([#46299](https://github.com/kubernetes/kubernetes/pull/46299), [@ncdc](https://github.com/ncdc)) -* NONE ([#45317](https://github.com/kubernetes/kubernetes/pull/45317), [@ericchiang](https://github.com/ericchiang)) * Fix init container status reporting when active deadline is exceeded. ([#46305](https://github.com/kubernetes/kubernetes/pull/46305), [@sjenning](https://github.com/sjenning)) * Improves performance of Cinder volume attach/detach operations ([#41785](https://github.com/kubernetes/kubernetes/pull/41785), [@jamiehannaford](https://github.com/jamiehannaford)) * GCE and AWS dynamic provisioners extension: admins can configure zone(s) in which a persistent volume shall be created. ([#38505](https://github.com/kubernetes/kubernetes/pull/38505), [@pospispa](https://github.com/pospispa)) @@ -1279,37 +2793,41 @@ filename | sha256 hash * Support updating storageclasses in etcd to storage.k8s.io/v1. You must do this prior to upgrading to 1.8. ([#46116](https://github.com/kubernetes/kubernetes/pull/46116), [@ncdc](https://github.com/ncdc)) * The namespace API object no longer supports the deletecollection operation. ([#46407](https://github.com/kubernetes/kubernetes/pull/46407), [@liggitt](https://github.com/liggitt)) * NetworkPolicy has been moved from `extensions/v1beta1` to the new ([#39164](https://github.com/kubernetes/kubernetes/pull/39164), [@danwinship](https://github.com/danwinship)) - * `networking.k8s.io/v1` API group. The structure remains unchanged from - * the beta1 API. - * The `net.beta.kubernetes.io/network-policy` annotation on Namespaces - * to opt in to isolation has been removed. Instead, isolation is now - * determined at a per-pod level, with pods being isolated if there is - * any NetworkPolicy whose spec.podSelector targets them. Pods that are - * targeted by NetworkPolicies accept traffic that is accepted by any of - * the NetworkPolicies (and nothing else), and pods that are not targeted - * by any NetworkPolicy accept all traffic by default. - * Action Required: - * When upgrading to Kubernetes 1.7 (and a network plugin that supports - * the new NetworkPolicy v1 semantics), to ensure full behavioral - * compatibility with v1beta1: - * 1. In Namespaces that previously had the "DefaultDeny" annotation, - * you can create equivalent v1 semantics by creating a - * NetworkPolicy that matches all pods but does not allow any - * traffic: - * kind: NetworkPolicy - * apiVersion: networking.k8s.io/v1 - * metadata: - * name: default-deny - * spec: - * podSelector: - * This will ensure that pods that aren't matched by any other - * NetworkPolicy will continue to be fully-isolated, as they were - * before. - * 2. In Namespaces that previously did not have the "DefaultDeny" - * annotation, you should delete any existing NetworkPolicy - * objects. These would have had no effect before, but with v1 - * semantics they might cause some traffic to be blocked that you - * didn't intend to be blocked. + `networking.k8s.io/v1` API group. The structure remains unchanged from + the beta1 API. + The `net.beta.kubernetes.io/network-policy` annotation on Namespaces + to opt in to isolation has been removed. Instead, isolation is now + determined at a per-pod level, with pods being isolated if there is + any NetworkPolicy whose spec.podSelector targets them. Pods that are + targeted by NetworkPolicies accept traffic that is accepted by any of + the NetworkPolicies (and nothing else), and pods that are not targeted + by any NetworkPolicy accept all traffic by default. + Action Required: + When upgrading to Kubernetes 1.7 (and a network plugin that supports + the new NetworkPolicy v1 semantics), to ensure full behavioral + compatibility with v1beta1: + 1. In Namespaces that previously had the "DefaultDeny" annotation, + you can create equivalent v1 semantics by creating a + NetworkPolicy that matches all pods but does not allow any + traffic: + + ```yaml + kind: NetworkPolicy + apiVersion: networking.k8s.io/v1 + metadata: + name: default-deny + spec: + podSelector: + ``` + + This will ensure that pods that aren't matched by any other + NetworkPolicy will continue to be fully-isolated, as they were + before. + 2. In Namespaces that previously did not have the "DefaultDeny" + annotation, you should delete any existing NetworkPolicy + objects. These would have had no effect before, but with v1 + semantics they might cause some traffic to be blocked that you + didn't intend to be blocked. ### Other notable changes @@ -1395,7 +2913,6 @@ filename | sha256 hash * The Prometheus metrics for the kube-apiserver for tracking incoming API requests and latencies now return the `subresource` label for correctly attributing the type of API call. ([#46354](https://github.com/kubernetes/kubernetes/pull/46354), [@smarterclayton](https://github.com/smarterclayton)) * Add Simplified Chinese translation for kubectl ([#45573](https://github.com/kubernetes/kubernetes/pull/45573), [@shiywang](https://github.com/shiywang)) * The --namespace flag is now honored for in-cluster clients that have an empty configuration. ([#46299](https://github.com/kubernetes/kubernetes/pull/46299), [@ncdc](https://github.com/ncdc)) -* NONE ([#45317](https://github.com/kubernetes/kubernetes/pull/45317), [@ericchiang](https://github.com/ericchiang)) * Fix init container status reporting when active deadline is exceeded. ([#46305](https://github.com/kubernetes/kubernetes/pull/46305), [@sjenning](https://github.com/sjenning)) * Improves performance of Cinder volume attach/detach operations ([#41785](https://github.com/kubernetes/kubernetes/pull/41785), [@jamiehannaford](https://github.com/jamiehannaford)) * GCE and AWS dynamic provisioners extension: admins can configure zone(s) in which a persistent volume shall be created. ([#38505](https://github.com/kubernetes/kubernetes/pull/38505), [@pospispa](https://github.com/pospispa)) @@ -1824,7 +3341,6 @@ filename | sha256 hash * This adds support for CNI ConfigLists, which permit plugin chaining. ([#42202](https://github.com/kubernetes/kubernetes/pull/42202), [@squeed](https://github.com/squeed)) * API requests using impersonation now include the `system:authenticated` group in the impersonated user automatically. ([#44076](https://github.com/kubernetes/kubernetes/pull/44076), [@liggitt](https://github.com/liggitt)) * Print conditions of RC/RS in 'kubectl describe' command. ([#44710](https://github.com/kubernetes/kubernetes/pull/44710), [@xiangpengzhao](https://github.com/xiangpengzhao)) -* `NONE` ([#44487](https://github.com/kubernetes/kubernetes/pull/44487), [@resouer](https://github.com/resouer)) * cinder: Add support for the KVM virtio-scsi driver ([#41498](https://github.com/kubernetes/kubernetes/pull/41498), [@mikebryant](https://github.com/mikebryant)) * Disallows installation of upstream docker from PPA in the Juju kubernetes-worker charm. ([#44681](https://github.com/kubernetes/kubernetes/pull/44681), [@wwwtyro](https://github.com/wwwtyro)) * Fluentd manifest pod is no longer created on non-registered master when creating clusters using kube-up.sh. ([#44721](https://github.com/kubernetes/kubernetes/pull/44721), [@piosz](https://github.com/piosz)) @@ -1943,7 +3459,6 @@ filename | sha256 hash * - gcr.io/google-containers/etcd-empty-dir-cleanup * - gcr.io/google-containers/kube-dnsmasq-amd64 * Check if pathExists before performing Unmount ([#39311](https://github.com/kubernetes/kubernetes/pull/39311), [@rkouj](https://github.com/rkouj)) -* NONE ([#39768](https://github.com/kubernetes/kubernetes/pull/39768), [@rkouj](https://github.com/rkouj)) * Unmount operation should not fail if volume is already unmounted ([#38547](https://github.com/kubernetes/kubernetes/pull/38547), [@rkouj](https://github.com/rkouj)) * Updates base image used for `kube-addon-manager` to latest `python:2.7-slim` and embedded `kubectl` to `v1.3.10`. No functionality changes expected. ([#42842](https://github.com/kubernetes/kubernetes/pull/42842), [@ixdy](https://github.com/ixdy)) * list-resources: don't fail if the grep fails to match any resources ([#41933](https://github.com/kubernetes/kubernetes/pull/41933), [@ixdy](https://github.com/ixdy)) @@ -4155,7 +5670,6 @@ filename | sha256 hash * PodSecurityPolicy resource is now enabled by default in the extensions API group. ([#39743](https://github.com/kubernetes/kubernetes/pull/39743), [@pweil-](https://github.com/pweil-)) * add --controllers to controller manager ([#39740](https://github.com/kubernetes/kubernetes/pull/39740), [@deads2k](https://github.com/deads2k)) * proxy/iptables: don't sync proxy rules if services map didn't change ([#38996](https://github.com/kubernetes/kubernetes/pull/38996), [@dcbw](https://github.com/dcbw)) -* NONE ([#39768](https://github.com/kubernetes/kubernetes/pull/39768), [@rkouj](https://github.com/rkouj)) * Update amd64 kube-proxy base image to debian-iptables-amd64:v5 ([#39725](https://github.com/kubernetes/kubernetes/pull/39725), [@ixdy](https://github.com/ixdy)) * Update dashboard version to v1.5.1 ([#39662](https://github.com/kubernetes/kubernetes/pull/39662), [@rf232](https://github.com/rf232)) * Fix kubectl get -f -o so it prints all items in the file ([#39038](https://github.com/kubernetes/kubernetes/pull/39038), [@ncdc](https://github.com/ncdc)) @@ -5624,10 +7138,8 @@ binary | sha256 hash * Move push-ci-build.sh to kubernetes/release repo ([#32444](https://github.com/kubernetes/kubernetes/pull/32444), [@david-mcmahon](https://github.com/david-mcmahon)) * vendor: update github.com/coreos/go-oidc client package ([#31564](https://github.com/kubernetes/kubernetes/pull/31564), [@ericchiang](https://github.com/ericchiang)) * Fixed an issue that caused a credential error when deploying federation control plane onto a GKE cluster. ([#31747](https://github.com/kubernetes/kubernetes/pull/31747), [@madhusudancs](https://github.com/madhusudancs)) -* NONE ([#32229](https://github.com/kubernetes/kubernetes/pull/32229), [@errordeveloper](https://github.com/errordeveloper)) * Error if a contextName is provided but not found in the kubeconfig. ([#31767](https://github.com/kubernetes/kubernetes/pull/31767), [@asalkeld](https://github.com/asalkeld)) * Use a Deployment for kube-dns ([#32018](https://github.com/kubernetes/kubernetes/pull/32018), [@MrHohn](https://github.com/MrHohn)) -* NONE ([#32067](https://github.com/kubernetes/kubernetes/pull/32067), [@erikh](https://github.com/erikh)) * Support graceful termination in kube-dns ([#31894](https://github.com/kubernetes/kubernetes/pull/31894), [@MrHohn](https://github.com/MrHohn)) * When prompting for passwords, don't echo to the terminal ([#31586](https://github.com/kubernetes/kubernetes/pull/31586), [@brendandburns](https://github.com/brendandburns)) * add group prefix matching for kubectl usage ([#32140](https://github.com/kubernetes/kubernetes/pull/32140), [@deads2k](https://github.com/deads2k)) @@ -6308,7 +7820,6 @@ binary | sha256 hash * allow group impersonation ([#30803](https://github.com/kubernetes/kubernetes/pull/30803), [@deads2k](https://github.com/deads2k)) * Always return command output for exec probes and kubelet RunInContainer ([#30731](https://github.com/kubernetes/kubernetes/pull/30731), [@ncdc](https://github.com/ncdc)) * Enable the garbage collector by default ([#30480](https://github.com/kubernetes/kubernetes/pull/30480), [@caesarxuchao](https://github.com/caesarxuchao)) -* NONE ([#30599](https://github.com/kubernetes/kubernetes/pull/30599), [@therc](https://github.com/therc)) * use valid_resources to replace kubectl.PossibleResourceTypes ([#30955](https://github.com/kubernetes/kubernetes/pull/30955), [@lojies](https://github.com/lojies)) * oidc auth provider: don't trim issuer URL ([#30944](https://github.com/kubernetes/kubernetes/pull/30944), [@ericchiang](https://github.com/ericchiang)) * Add a short `-n` for `kubectl --namespace` ([#30630](https://github.com/kubernetes/kubernetes/pull/30630), [@silasbw](https://github.com/silasbw)) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 8238ba1ea3c..89904f1a9b6 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -30,61 +30,71 @@ }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/go-ansiterm", - "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" + "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" }, { "ImportPath": "github.com/Azure/go-ansiterm/winterm", - "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" + "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + }, + { + "ImportPath": "github.com/Azure/go-autorest/autorest/adal", + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/to", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/validation", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/MakeNowJust/heredoc", @@ -95,6 +105,10 @@ "Comment": "v0.4.2", "Rev": "f533f7a102197536779ea3a8cb881d639e21ec5a" }, + { + "ImportPath": "github.com/NYTimes/gziphandler", + "Rev": "56545f4a5d46df9a6648819d1664c3a03a13ffdb" + }, { "ImportPath": "github.com/PuerkitoBio/purell", "Comment": "v1.0.0", @@ -411,7 +425,7 @@ }, { "ImportPath": "github.com/codegangsta/negroni", - "Comment": "v0.1-62-g8d75e11", + "Comment": "v0.1.0-62-g8d75e11", "Rev": "8d75e11374a1928608c906fe745b538483e7aeb2" }, { @@ -810,7 +824,8 @@ }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "Comment": "v1.1.0-1-g782f496", + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { "ImportPath": "github.com/daviddengcn/go-colortext", @@ -831,50 +846,130 @@ "Comment": "v2.4.0-rc.1-38-gcd27f179", "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" }, + { + "ImportPath": "github.com/docker/docker/api/types", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/blkiodev", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/container", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/events", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/filters", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/mount", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/network", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/reference", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/registry", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/strslice", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/swarm", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/time", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/versions", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/api/types/volume", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/client", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, { "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "v1.11.2", - "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tlsconfig", + "Comment": "v1.13.1-rc2", + "Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1" }, { "ImportPath": "github.com/docker/engine-api/client", @@ -1683,18 +1778,18 @@ }, { "ImportPath": "github.com/heketi/heketi/client/api/go-client", - "Comment": "v4.0.0-22-g7a54b6f", - "Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2" + "Comment": "v4.0.0-95-gaaf4061", + "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { "ImportPath": "github.com/heketi/heketi/pkg/glusterfs/api", - "Comment": "v4.0.0-22-g7a54b6f", - "Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2" + "Comment": "v4.0.0-95-gaaf4061", + "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { "ImportPath": "github.com/heketi/heketi/pkg/utils", - "Comment": "v4.0.0-22-g7a54b6f", - "Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2" + "Comment": "v4.0.0-95-gaaf4061", + "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { "ImportPath": "github.com/howeyc/gopass", @@ -1820,31 +1915,6 @@ "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" }, - { - "ImportPath": "github.com/mesos/mesos-go/detector", - "Comment": "before-0.26-protos-33-g45c8b08", - "Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0" - }, - { - "ImportPath": "github.com/mesos/mesos-go/detector/zoo", - "Comment": "before-0.26-protos-33-g45c8b08", - "Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0" - }, - { - "ImportPath": "github.com/mesos/mesos-go/mesosproto", - "Comment": "before-0.26-protos-33-g45c8b08", - "Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0" - }, - { - "ImportPath": "github.com/mesos/mesos-go/mesosutil", - "Comment": "before-0.26-protos-33-g45c8b08", - "Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0" - }, - { - "ImportPath": "github.com/mesos/mesos-go/upid", - "Comment": "before-0.26-protos-33-g45c8b08", - "Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0" - }, { "ImportPath": "github.com/miekg/coredns/middleware/etcd/msg", "Comment": "v003", @@ -2077,82 +2147,82 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/label", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/selinux", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc2-49-gd223e2ad", + "Comment": "v1.0.0-rc2-49-gd223e2a", "Rev": "d223e2adae83f62d58448a799a5da05730228089" }, { @@ -2161,6 +2231,7 @@ }, { "ImportPath": "github.com/pelletier/go-buffruneio", + "Comment": "v0.1.0", "Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d" }, { @@ -2218,7 +2289,7 @@ }, { "ImportPath": "github.com/quobyte/api", - "Rev": "bf713b5a4333f44504fa1ce63690de45cfed6413" + "Rev": "cb10db90715b14d4784465d2fa3b915dfacc0628" }, { "ImportPath": "github.com/rackspace/gophercloud", @@ -2350,8 +2421,9 @@ "Rev": "300106c228d52c8941d4b3de6054a6062a86dda3" }, { - "ImportPath": "github.com/samuel/go-zookeeper/zk", - "Rev": "177002e16a0061912f02377e2dd8951a8b3551bc" + "ImportPath": "github.com/satori/uuid", + "Comment": "v1.1.0-8-g5bf94b6", + "Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b" }, { "ImportPath": "github.com/seccomp/libseccomp-golang", @@ -2423,18 +2495,18 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.0-88-ge3a8ff8", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Comment": "v1.1.4-66-gf6abca5", + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/stretchr/testify/mock", - "Comment": "v1.0-88-ge3a8ff8", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Comment": "v1.1.4-66-gf6abca5", + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/stretchr/testify/require", - "Comment": "v1.0-88-ge3a8ff8", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Comment": "v1.1.4-66-gf6abca5", + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/syndtr/gocapability/capability", @@ -2568,6 +2640,7 @@ }, { "ImportPath": "github.com/xiang90/probing", + "Comment": "0.0.1", "Rev": "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2" }, { @@ -2779,10 +2852,18 @@ "ImportPath": "golang.org/x/tools/container/intsets", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, + { + "ImportPath": "google.golang.org/api/cloudkms/v1", + "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" + }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" }, + { + "ImportPath": "google.golang.org/api/compute/v0.alpha", + "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" + }, { "ImportPath": "google.golang.org/api/compute/v0.beta", "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" @@ -2870,22 +2951,27 @@ }, { "ImportPath": "gopkg.in/gcfg.v1", + "Comment": "v1.0.0", "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" }, { "ImportPath": "gopkg.in/gcfg.v1/scanner", + "Comment": "v1.0.0", "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" }, { "ImportPath": "gopkg.in/gcfg.v1/token", + "Comment": "v1.0.0", "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" }, { "ImportPath": "gopkg.in/gcfg.v1/types", + "Comment": "v1.0.0", "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" }, { "ImportPath": "gopkg.in/inf.v0", + "Comment": "v0.9.0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" }, { diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 72a219de0c1..b3ccbe68fd3 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -8952,6 +8952,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/Azure/azure-sdk-for-go/arm/disk licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/Azure/azure-sdk-for-go/LICENSE cce6fd055830ca30ff78fdf077e870d6 - +================================================================================ + + ================================================================================ = vendor/github.com/Azure/azure-sdk-for-go/arm/network licensed under: = @@ -9839,6 +10049,205 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/Azure/go-autorest/autorest/adal licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b - +================================================================================ + + ================================================================================ = vendor/github.com/Azure/go-autorest/autorest/azure licensed under: = @@ -28490,7 +28899,9 @@ SOFTWARE. ================================================================================ = vendor/github.com/davecgh/go-spew/spew licensed under: = -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -28504,7 +28915,7 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -= vendor/github.com/davecgh/go-spew/LICENSE 8bc888171b6c073957745dfa153bd402 - += vendor/github.com/davecgh/go-spew/LICENSE 818c0a1d81cfcfdb7ecd58db268bab7e - ================================================================================ @@ -29006,6 +29417,2991 @@ Apache License ================================================================================ +================================================================================ += vendor/github.com/docker/docker/api/types licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/blkiodev licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/container licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/events licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/filters licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/mount licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/network licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/reference licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/registry licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/strslice licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/swarm licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/time licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/versions licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/api/types/volume licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + +================================================================================ += vendor/github.com/docker/docker/client licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + ================================================================================ = vendor/github.com/docker/docker/pkg/jsonlog licensed under: = @@ -30797,6 +34193,205 @@ Apache License ================================================================================ +================================================================================ += vendor/github.com/docker/docker/pkg/tlsconfig licensed under: = + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/docker/docker/LICENSE aadc30f9c14d876ded7bedc0afd2d3d7 - +================================================================================ + + ================================================================================ = vendor/github.com/docker/engine-api/client licensed under: = @@ -62901,1046 +66496,6 @@ THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/mesos/mesos-go/detector licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 - -================================================================================ - - -================================================================================ -= vendor/github.com/mesos/mesos-go/detector/zoo licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 - -================================================================================ - - -================================================================================ -= vendor/github.com/mesos/mesos-go/mesosproto licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 - -================================================================================ - - -================================================================================ -= vendor/github.com/mesos/mesos-go/mesosutil licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 - -================================================================================ - - -================================================================================ -= vendor/github.com/mesos/mesos-go/upid licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 - -================================================================================ - - ================================================================================ = vendor/github.com/Microsoft/go-winio licensed under: = @@ -64587,6 +67142,27 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/NYTimes/gziphandler licensed under: = + +Copyright (c) 2015 The New York Times Company + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this library except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + += vendor/github.com/NYTimes/gziphandler/LICENSE.md e30b94cbe70132b181f72f953fbb3c82 - +================================================================================ + + ================================================================================ = vendor/github.com/onsi/ginkgo licensed under: = @@ -75334,35 +77910,30 @@ Blackfriday is distributed under the Simplified BSD License: ================================================================================ -= vendor/github.com/samuel/go-zookeeper/zk licensed under: = += vendor/github.com/satori/uuid licensed under: = -Copyright (c) 2013, Samuel Stauffer -All rights reserved. +Copyright (C) 2013-2016 by Maxim Bublis -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -* Neither the name of the author nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -= vendor/github.com/samuel/go-zookeeper/LICENSE 0d3bff996e9a8f99d8ba45af7c9f6da7 - += vendor/github.com/satori/uuid/LICENSE 02d5d17de0c82a23a09863acccc026f6 - ================================================================================ @@ -84080,6 +86651,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/api/cloudkms/v1 licensed under: = + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 - +================================================================================ + + ================================================================================ = vendor/google.golang.org/api/cloudmonitoring/v2beta2 licensed under: = @@ -84115,6 +86721,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/api/compute/v0.alpha licensed under: = + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 - +================================================================================ + + ================================================================================ = vendor/google.golang.org/api/compute/v0.beta licensed under: = diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index ce21e91f747..d5bd1f81b47 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -3,11 +3,13 @@ aliases: - davidopp - timothysc - wojtek-t + - k82cn sig-scheduling: - davidopp + - bsalamat - timothysc - wojtek-t - - k82cn + - k82cn - jayunit100 sig-cli-maintainers: - adohe @@ -52,7 +54,7 @@ aliases: - pmorie - resouer - sjpotter - - timstclair + - tallclair - tmrts - vishh - yifan-gu diff --git a/api/OWNERS b/api/OWNERS index 3a9b0c6d159..6d85ec75f9b 100644 --- a/api/OWNERS +++ b/api/OWNERS @@ -17,7 +17,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -33,7 +32,7 @@ reviewers: - pwittrock - roberthbailey - ncdc -- timstclair +- tallclair - yifan-gu - eparis - mwielgus diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 04fc9ec271a..af0a6b5149f 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -33551,450 +33551,6 @@ } ] }, - "/apis/extensions/v1beta1/thirdpartyresources": { - "get": { - "description": "list or watch objects of kind ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "listExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "name": "fieldSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "If true, partially initialized resources are included in the response.", - "name": "includeUninitialized", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "name": "labelSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "name": "resourceVersion", - "in": "query" - }, - { - "uniqueItems": true, - "type": "integer", - "description": "Timeout for the list/watch call.", - "name": "timeoutSeconds", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "name": "watch", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResourceList" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "post": { - "description": "create a ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "createExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "delete": { - "description": "delete collection of ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "deleteExtensionsV1beta1CollectionThirdPartyResource", - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "name": "fieldSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "If true, partially initialized resources are included in the response.", - "name": "includeUninitialized", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "name": "labelSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "name": "resourceVersion", - "in": "query" - }, - { - "uniqueItems": true, - "type": "integer", - "description": "Timeout for the list/watch call.", - "name": "timeoutSeconds", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "name": "watch", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "If 'true', then the output is pretty printed.", - "name": "pretty", - "in": "query" - } - ] - }, - "/apis/extensions/v1beta1/thirdpartyresources/{name}": { - "get": { - "description": "read the specified ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "readExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "uniqueItems": true, - "type": "boolean", - "description": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", - "name": "exact", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Should this value be exported. Export strips fields that a user can not specify.", - "name": "export", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "put": { - "description": "replace the specified ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "replaceExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "delete": { - "description": "delete a ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "deleteExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - }, - { - "uniqueItems": true, - "type": "integer", - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "name": "gracePeriodSeconds", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "name": "orphanDependents", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", - "name": "propagationPolicy", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "patch": { - "description": "partially update the specified ThirdPartyResource", - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "patchExtensionsV1beta1ThirdPartyResource", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "name of the ThirdPartyResource", - "name": "name", - "in": "path", - "required": true - }, - { - "uniqueItems": true, - "type": "string", - "description": "If 'true', then the output is pretty printed.", - "name": "pretty", - "in": "query" - } - ] - }, "/apis/extensions/v1beta1/watch/daemonsets": { "get": { "description": "watch individual changes to a list of DaemonSet", @@ -35653,194 +35209,6 @@ } ] }, - "/apis/extensions/v1beta1/watch/thirdpartyresources": { - "get": { - "description": "watch individual changes to a list of ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "watchExtensionsV1beta1ThirdPartyResourceList", - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "name": "fieldSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "If true, partially initialized resources are included in the response.", - "name": "includeUninitialized", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "name": "labelSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "If 'true', then the output is pretty printed.", - "name": "pretty", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "name": "resourceVersion", - "in": "query" - }, - { - "uniqueItems": true, - "type": "integer", - "description": "Timeout for the list/watch call.", - "name": "timeoutSeconds", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "name": "watch", - "in": "query" - } - ] - }, - "/apis/extensions/v1beta1/watch/thirdpartyresources/{name}": { - "get": { - "description": "watch changes to an object of kind ThirdPartyResource", - "consumes": [ - "*/*" - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "schemes": [ - "https" - ], - "tags": [ - "extensions_v1beta1" - ], - "operationId": "watchExtensionsV1beta1ThirdPartyResource", - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - }, - "parameters": [ - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "name": "fieldSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "If true, partially initialized resources are included in the response.", - "name": "includeUninitialized", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "name": "labelSelector", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "name of the ThirdPartyResource", - "name": "name", - "in": "path", - "required": true - }, - { - "uniqueItems": true, - "type": "string", - "description": "If 'true', then the output is pretty printed.", - "name": "pretty", - "in": "query" - }, - { - "uniqueItems": true, - "type": "string", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "name": "resourceVersion", - "in": "query" - }, - { - "uniqueItems": true, - "type": "integer", - "description": "Timeout for the list/watch call.", - "name": "timeoutSeconds", - "in": "query" - }, - { - "uniqueItems": true, - "type": "boolean", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "name": "watch", - "in": "query" - } - ] - }, "/apis/networking.k8s.io/": { "get": { "description": "get information of a group", @@ -52779,15 +52147,6 @@ } } }, - "io.k8s.api.extensions.v1beta1.APIVersion": { - "description": "An APIVersion represents a single concrete version of an object model.", - "properties": { - "name": { - "description": "Name of this version (e.g. 'v1').", - "type": "string" - } - } - }, "io.k8s.api.extensions.v1beta1.DaemonSet": { "description": "DaemonSet represents the configuration of a daemon set.", "properties": { @@ -54008,75 +53367,6 @@ } } }, - "io.k8s.api.extensions.v1beta1.ThirdPartyResource": { - "description": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - "type": "string" - }, - "description": { - "description": "Description is the description of this object.", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "description": "Standard object metadata", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - }, - "versions": { - "description": "Versions are versions for this third party object", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.APIVersion" - } - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResource" - } - ] - }, - "io.k8s.api.extensions.v1beta1.ThirdPartyResourceList": { - "description": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "required": [ - "items" - ], - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "Items is the list of ThirdPartyResources.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource" - } - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "description": "Standard list metadata.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "extensions", - "version": "v1beta1", - "kind": "ThirdPartyResourceList" - } - ] - }, "io.k8s.api.networking.v1.NetworkPolicy": { "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "properties": { diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json index 4c0f72665c5..31dd2aa0dd3 100644 --- a/api/swagger-spec/extensions_v1beta1.json +++ b/api/swagger-spec/extensions_v1beta1.json @@ -6144,621 +6144,6 @@ } ] }, - { - "path": "/apis/extensions/v1beta1/thirdpartyresources", - "description": "API at /apis/extensions/v1beta1", - "operations": [ - { - "type": "v1beta1.ThirdPartyResourceList", - "method": "GET", - "summary": "list or watch objects of kind ThirdPartyResource", - "nickname": "listThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "labelSelector", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "fieldSelector", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "includeUninitialized", - "description": "If true, partially initialized resources are included in the response.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "watch", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "resourceVersion", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "required": false, - "allowMultiple": false - }, - { - "type": "integer", - "paramType": "query", - "name": "timeoutSeconds", - "description": "Timeout for the list/watch call.", - "required": false, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1beta1.ThirdPartyResourceList" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "consumes": [ - "*/*" - ] - }, - { - "type": "v1beta1.ThirdPartyResource", - "method": "POST", - "summary": "create a ThirdPartyResource", - "nickname": "createThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "v1beta1.ThirdPartyResource", - "paramType": "body", - "name": "body", - "description": "", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1beta1.ThirdPartyResource" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "*/*" - ] - }, - { - "type": "v1.Status", - "method": "DELETE", - "summary": "delete collection of ThirdPartyResource", - "nickname": "deletecollectionThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "labelSelector", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "fieldSelector", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "includeUninitialized", - "description": "If true, partially initialized resources are included in the response.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "watch", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "resourceVersion", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "required": false, - "allowMultiple": false - }, - { - "type": "integer", - "paramType": "query", - "name": "timeoutSeconds", - "description": "Timeout for the list/watch call.", - "required": false, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1.Status" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "*/*" - ] - } - ] - }, - { - "path": "/apis/extensions/v1beta1/watch/thirdpartyresources", - "description": "API at /apis/extensions/v1beta1", - "operations": [ - { - "type": "v1.WatchEvent", - "method": "GET", - "summary": "watch individual changes to a list of ThirdPartyResource", - "nickname": "watchThirdPartyResourceList", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "labelSelector", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "fieldSelector", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "includeUninitialized", - "description": "If true, partially initialized resources are included in the response.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "watch", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "resourceVersion", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "required": false, - "allowMultiple": false - }, - { - "type": "integer", - "paramType": "query", - "name": "timeoutSeconds", - "description": "Timeout for the list/watch call.", - "required": false, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1.WatchEvent" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "consumes": [ - "*/*" - ] - } - ] - }, - { - "path": "/apis/extensions/v1beta1/thirdpartyresources/{name}", - "description": "API at /apis/extensions/v1beta1", - "operations": [ - { - "type": "v1beta1.ThirdPartyResource", - "method": "GET", - "summary": "read the specified ThirdPartyResource", - "nickname": "readThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "export", - "description": "Should this value be exported. Export strips fields that a user can not specify.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "exact", - "description": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "path", - "name": "name", - "description": "name of the ThirdPartyResource", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1beta1.ThirdPartyResource" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "*/*" - ] - }, - { - "type": "v1beta1.ThirdPartyResource", - "method": "PUT", - "summary": "replace the specified ThirdPartyResource", - "nickname": "replaceThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "v1beta1.ThirdPartyResource", - "paramType": "body", - "name": "body", - "description": "", - "required": true, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "path", - "name": "name", - "description": "name of the ThirdPartyResource", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1beta1.ThirdPartyResource" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "*/*" - ] - }, - { - "type": "v1beta1.ThirdPartyResource", - "method": "PATCH", - "summary": "partially update the specified ThirdPartyResource", - "nickname": "patchThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "v1.Patch", - "paramType": "body", - "name": "body", - "description": "", - "required": true, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "path", - "name": "name", - "description": "name of the ThirdPartyResource", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1beta1.ThirdPartyResource" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json" - ] - }, - { - "type": "v1.Status", - "method": "DELETE", - "summary": "delete a ThirdPartyResource", - "nickname": "deleteThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "v1.DeleteOptions", - "paramType": "body", - "name": "body", - "description": "", - "required": true, - "allowMultiple": false - }, - { - "type": "integer", - "paramType": "query", - "name": "gracePeriodSeconds", - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "orphanDependents", - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "propagationPolicy", - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "path", - "name": "name", - "description": "name of the ThirdPartyResource", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1.Status" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "consumes": [ - "*/*" - ] - } - ] - }, - { - "path": "/apis/extensions/v1beta1/watch/thirdpartyresources/{name}", - "description": "API at /apis/extensions/v1beta1", - "operations": [ - { - "type": "v1.WatchEvent", - "method": "GET", - "summary": "watch changes to an object of kind ThirdPartyResource", - "nickname": "watchThirdPartyResource", - "parameters": [ - { - "type": "string", - "paramType": "query", - "name": "pretty", - "description": "If 'true', then the output is pretty printed.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "labelSelector", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "fieldSelector", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "includeUninitialized", - "description": "If true, partially initialized resources are included in the response.", - "required": false, - "allowMultiple": false - }, - { - "type": "boolean", - "paramType": "query", - "name": "watch", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "query", - "name": "resourceVersion", - "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "required": false, - "allowMultiple": false - }, - { - "type": "integer", - "paramType": "query", - "name": "timeoutSeconds", - "description": "Timeout for the list/watch call.", - "required": false, - "allowMultiple": false - }, - { - "type": "string", - "paramType": "path", - "name": "name", - "description": "name of the ThirdPartyResource", - "required": true, - "allowMultiple": false - } - ], - "responseMessages": [ - { - "code": 200, - "message": "OK", - "responseModel": "v1.WatchEvent" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "consumes": [ - "*/*" - ] - } - ] - }, { "path": "/apis/extensions/v1beta1", "description": "API at /apis/extensions/v1beta1", @@ -10300,73 +9685,6 @@ } } }, - "v1beta1.ThirdPartyResourceList": { - "id": "v1beta1.ThirdPartyResourceList", - "description": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "required": [ - "items" - ], - "properties": { - "kind": { - "type": "string", - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - }, - "apiVersion": { - "type": "string", - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources" - }, - "metadata": { - "$ref": "v1.ListMeta", - "description": "Standard list metadata." - }, - "items": { - "type": "array", - "items": { - "$ref": "v1beta1.ThirdPartyResource" - }, - "description": "Items is the list of ThirdPartyResources." - } - } - }, - "v1beta1.ThirdPartyResource": { - "id": "v1beta1.ThirdPartyResource", - "description": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "properties": { - "kind": { - "type": "string", - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - }, - "apiVersion": { - "type": "string", - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources" - }, - "metadata": { - "$ref": "v1.ObjectMeta", - "description": "Standard object metadata" - }, - "description": { - "type": "string", - "description": "Description is the description of this object." - }, - "versions": { - "type": "array", - "items": { - "$ref": "v1beta1.APIVersion" - }, - "description": "Versions are versions for this third party object" - } - } - }, - "v1beta1.APIVersion": { - "id": "v1beta1.APIVersion", - "description": "An APIVersion represents a single concrete version of an object model.", - "properties": { - "name": { - "type": "string", - "description": "Name of this version (e.g. 'v1')." - } - } - }, "v1.APIResourceList": { "id": "v1.APIResourceList", "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", diff --git a/build/BUILD b/build/BUILD index f87cc21e335..5f08ab29c54 100644 --- a/build/BUILD +++ b/build/BUILD @@ -21,36 +21,23 @@ filegroup( tags = ["automanaged"], ) -docker_build( - name = "busybox", - debs = [ - "@busybox_deb//file", - ], - symlinks = { - "/bin/sh": "/bin/busybox", - "/usr/bin/busybox": "/bin/busybox", - "/usr/sbin/busybox": "/bin/busybox", - "/sbin/busybox": "/bin/busybox", - }, -) - # This list should roughly match kube::build::get_docker_wrapped_binaries() # in build/common.sh. DOCKERIZED_BINARIES = { "cloud-controller-manager": { - "base": ":busybox", + "base": "@official_busybox//image:image.tar", "target": "//cmd/cloud-controller-manager:cloud-controller-manager", }, "kube-apiserver": { - "base": ":busybox", + "base": "@official_busybox//image:image.tar", "target": "//cmd/kube-apiserver:kube-apiserver", }, "kube-controller-manager": { - "base": ":busybox", + "base": "@official_busybox//image:image.tar", "target": "//cmd/kube-controller-manager:kube-controller-manager", }, "kube-scheduler": { - "base": ":busybox", + "base": "@official_busybox//image:image.tar", "target": "//plugin/cmd/kube-scheduler:kube-scheduler", }, "kube-proxy": { diff --git a/build/common.sh b/build/common.sh index 67b0a4927d8..f6def8f3e3b 100755 --- a/build/common.sh +++ b/build/common.sh @@ -384,7 +384,13 @@ function kube::build::short_hash() { # a workaround for bug https://github.com/docker/docker/issues/3968. function kube::build::destroy_container() { "${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true - "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true + if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then + # Workaround https://github.com/moby/moby/issues/33948. + # TODO: remove when 17.06.0 is not relevant anymore + DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true + else + "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true + fi "${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true } diff --git a/build/root/Makefile b/build/root/Makefile index 401b8c44f45..44df2b2570d 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -126,6 +126,21 @@ verify: verify_generated_files KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh -v endif +define QUICK_VERIFY_HELP_INFO +# Runs only the presubmission verifications that aren't slow. +# +# Example: +# make quick-verify +endef +.PHONY: quick-verify +ifeq ($(PRINT_HELP),y) +quick-verify: + @echo "$$QUICK_VERIFY_HELP_INFO" +else +quick-verify: verify_generated_files + hack/make-rules/verify.sh -v -Q +endif + define UPDATE_HELP_INFO # Runs all the generated updates. # @@ -240,6 +255,11 @@ define TEST_E2E_NODE_HELP_INFO # IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images. # Used when RUNTIME is set to "remote". # IMAGE_CONFIG_FILE: path to a file containing image configuration. +# SYSTEM_SPEC_NAME: The name of the system spec to be used for validating the +# image in the node conformance test. The specs are located at +# test/e2e_node/system/specs/. For example, "SYSTEM_SPEC_NAME=gke" will use +# the spec at test/e2e_node/system/specs/gke.yaml. If unspecified, the +# default built-in spec (system.DefaultSpec) will be used. # # Example: # make test-e2e-node FOCUS=Kubelet SKIP=container @@ -523,6 +543,20 @@ bazel-test: bazel test --test_tag_filters=-integration --flaky_test_attempts=3 //cmd/... //pkg/... //federation/... //plugin/... //third_party/... //hack/... //hack:verify-all //vendor/k8s.io/... endif +ifeq ($(PRINT_HELP),y) +define BAZEL_TEST_INTEGRATION_HELP_INFO +# Integration test with bazel +# +# Example: +# make bazel-test-integration +endef +bazel-test-integration: + @echo "$$BAZEL_TEST_INTEGRATION_HELP_INFO" +else +bazel-test-integration: + bazel test //test/integration/... +endif + ifeq ($(PRINT_HELP),y) define BAZEL_BUILD_HELP_INFO # Build release tars with bazel diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index f57923a8bc6..e7f23f182c5 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -12,6 +12,16 @@ http_archive( urls = ["https://github.com/kubernetes/repo-infra/archive/9dedd5f4093884c133ad5ea73695b28338b954ab.tar.gz"], ) +ETCD_VERSION = "3.0.17" + +new_http_archive( + name = "com_coreos_etcd", + build_file = "third_party/etcd.BUILD", + sha256 = "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b", + strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION, + urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)], +) + # This contains a patch to not prepend ./ to tarfiles produced by pkg_tar. # When merged upstream, we'll no longer need to use ixdy's fork: # https://bazel-review.googlesource.com/#/c/10390/ @@ -24,9 +34,9 @@ http_archive( http_archive( name = "io_bazel_rules_docker", - sha256 = "261fbd8fda1d06a12a0479019b46acd302c6aaa8df8e49383dc37917f20492a1", - strip_prefix = "rules_docker-52d9faf209ff6d16eb850b6b66d03483735e0633", - urls = ["https://github.com/bazelbuild/rules_docker/archive/52d9faf209ff6d16eb850b6b66d03483735e0633.tar.gz"], + sha256 = "40d780165c0b9fbb3ddca858df7347381af0e87e430c74863e4ce9d6f6441023", + strip_prefix = "rules_docker-8359263f35227a3634ea023ff4ae163189eb4b26", + urls = ["https://github.com/bazelbuild/rules_docker/archive/8359263f35227a3634ea023ff4ae163189eb4b26.tar.gz"], ) load("@io_bazel_rules_go//go:def.bzl", "go_repositories") @@ -38,22 +48,6 @@ go_repositories( docker_repositories() -# for building docker base images -debs = ( - ( - "busybox_deb", - "5f81f140777454e71b9e5bfdce9c89993de5ddf4a7295ea1cfda364f8f630947", - "http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b3_amd64.deb", - "https://storage.googleapis.com/kubernetes-release/debs/busybox-static_1.22.0-19+b3_amd64.deb", - ), -) - -[http_file( - name = name, - sha256 = sha256, - url = url, -) for name, sha256, origin, url in debs] - http_file( name = "kubernetes_cni", sha256 = "05ab3937bc68562e989dc143362ec4d4275262ba9f359338aed720fc914457a5", @@ -62,7 +56,16 @@ http_file( docker_pull( name = "debian-iptables-amd64", - digest = "sha256:bc20977ac38abfb43071b4c61c4b7edb30af894c05eb06758dd61d05118d2842", # v7 + digest = "sha256:bc20977ac38abfb43071b4c61c4b7edb30af894c05eb06758dd61d05118d2842", registry = "gcr.io", repository = "google-containers/debian-iptables-amd64", + tag = "v7", # ignored, but kept here for documentation +) + +docker_pull( + name = "official_busybox", + digest = "sha256:be3c11fdba7cfe299214e46edc642e09514dbb9bbefcd0d3836c05a1e0cd0642", + registry = "index.docker.io", + repository = "library/busybox", + tag = "latest", # ignored, but kept here for documentation ) diff --git a/build/visible_to/BUILD b/build/visible_to/BUILD index 1d0dc31abb2..513da5a77e4 100644 --- a/build/visible_to/BUILD +++ b/build/visible_to/BUILD @@ -49,6 +49,7 @@ package_group( packages = [ "//test/e2e", "//test/e2e/framework", + "//test/e2e/kubectl", "//test/e2e/workload", "//test/integration/etcd", "//test/integration/framework", @@ -72,7 +73,7 @@ package_group( packages = [ "//cmd/kubeadm/app", "//cmd/kubeadm/app/cmd", - "//cmd/kubeadm/app/master", + "//cmd/kubeadm/app/phases/controlplane", ], ) diff --git a/cluster/addons/cluster-loadbalancing/glbc/README.md b/cluster/addons/cluster-loadbalancing/glbc/README.md index 0b3403e7ef8..0d9685d3496 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/README.md +++ b/cluster/addons/cluster-loadbalancing/glbc/README.md @@ -42,44 +42,43 @@ spec: ``` * time, t=0 -```console -$ kubectl get ing -NAME RULE BACKEND ADDRESS -test-ingress - default-http-backend:80 -$ kubectl describe ing -No events. -``` + ```console + $ kubectl get ing + NAME RULE BACKEND ADDRESS + test-ingress - default-http-backend:80 + $ kubectl describe ing + No events. + ``` * time, t=1m -```console -$ kubectl get ing -NAME RULE BACKEND ADDRESS -test-ingress - default-http-backend:80 130.211.5.27 + ```console + $ kubectl get ing + NAME RULE BACKEND ADDRESS + test-ingress - default-http-backend:80 130.211.5.27 -$ kubectl describe ing -target-proxy: k8s-tp-default-test-ingress -url-map: k8s-um-default-test-ingress -backends: {"k8s-be-32342":"UNKNOWN"} -forwarding-rule: k8s-fw-default-test-ingress -Events: - FirstSeen LastSeen Count From SubobjectPath Reason Message - ───────── ──────── ───── ──── ───────────── ────── ─────── - 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 -``` + $ kubectl describe ing + target-proxy: k8s-tp-default-test-ingress + url-map: k8s-um-default-test-ingress + backends: {"k8s-be-32342":"UNKNOWN"} + forwarding-rule: k8s-fw-default-test-ingress + Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + ───────── ──────── ───── ──── ───────────── ────── ─────── + 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 + ``` * time, t=5m -```console -$ kubectl describe ing -target-proxy: k8s-tp-default-test-ingress -url-map: k8s-um-default-test-ingress -backends: {"k8s-be-32342":"HEALTHY"} -forwarding-rule: k8s-fw-default-test-ingress -Events: - FirstSeen LastSeen Count From SubobjectPath Reason Message - ───────── ──────── ───── ──── ───────────── ────── ─────── - 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 - -``` + ```console + $ kubectl describe ing + target-proxy: k8s-tp-default-test-ingress + url-map: k8s-um-default-test-ingress + backends: {"k8s-be-32342":"HEALTHY"} + forwarding-rule: k8s-fw-default-test-ingress + Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + ───────── ──────── ───── ──── ───────────── ────── ─────── + 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 + ``` ## Disabling GLBC @@ -87,20 +86,20 @@ Since GLBC runs as a cluster addon, you cannot simply delete the RC. The easiest * IFF you want to tear down existing L7 loadbalancers, hit the /delete-all-and-quit endpoint on the pod: -```console -$ kubectl get pods --namespace=kube-system -NAME READY STATUS RESTARTS AGE -l7-lb-controller-7bb21 1/1 Running 0 1h -$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system -$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow -... -I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion. -``` + ```console + $ kubectl get pods --namespace=kube-system + NAME READY STATUS RESTARTS AGE + l7-lb-controller-7bb21 1/1 Running 0 1h + $ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system + $ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow + ... + I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion. + ``` * Nullify the RC (but don't delete it or the addon controller will "fix" it for you) -```console -$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system -``` + ```console + $ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system + ``` ## Limitations diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 989b5d30f85..57d915b95bf 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.4.0-beta.0 + name: heapster-v1.4.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.4.0-beta.0 + version: v1.4.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 template: metadata: labels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: heapster livenessProbe: httpGet: @@ -65,7 +65,7 @@ spec: - name: usr-ca-certs mountPath: /usr/share/ca-certificates readOnly: true - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: eventer command: - /eventer @@ -78,7 +78,7 @@ spec: - name: usr-ca-certs mountPath: /usr/share/ca-certificates readOnly: true - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: heapster-nanny resources: limits: @@ -102,12 +102,10 @@ spec: - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} - --extra-memory={{metrics_memory_per_node}}Mi - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=heapster - --poll-period=300000 - - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: eventer-nanny resources: limits: @@ -131,11 +129,9 @@ spec: - --extra-cpu=0m - --memory={{base_eventer_memory}} - --extra-memory={{eventer_memory_per_node}}Ki - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=eventer - --poll-period=300000 - - --estimator=exponential volumes: - name: ssl-certs hostPath: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 67535c4d4b1..82e11998ed2 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.4.0-beta.0 + name: heapster-v1.4.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.4.0-beta.0 + version: v1.4.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 template: metadata: labels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: heapster livenessProbe: httpGet: @@ -66,7 +66,7 @@ spec: - name: usr-ca-certs mountPath: /usr/share/ca-certificates readOnly: true - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: eventer command: - /eventer @@ -79,7 +79,7 @@ spec: - name: usr-ca-certs mountPath: /usr/share/ca-certificates readOnly: true - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: heapster-nanny resources: limits: @@ -103,12 +103,10 @@ spec: - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=heapster - --poll-period=300000 - - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: eventer-nanny resources: limits: @@ -132,11 +130,9 @@ spec: - --extra-cpu=0m - --memory={{ base_eventer_memory }} - --extra-memory={{ eventer_memory_per_node }}Ki - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=eventer - --poll-period=300000 - - --estimator=exponential volumes: - name: ssl-certs hostPath: diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 0b5ac12087a..0b27abbc2ee 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.4.0-beta.0 + name: heapster-v1.4.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.4.0-beta.0 + version: v1.4.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 template: metadata: labels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: heapster livenessProbe: httpGet: @@ -58,13 +58,13 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: heapster-nanny resources: limits: @@ -88,12 +88,10 @@ spec: - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=heapster - --poll-period=300000 - - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: eventer-nanny resources: limits: @@ -117,11 +115,9 @@ spec: - --extra-cpu=0m - --memory={{ base_eventer_memory }} - --extra-memory={{ eventer_memory_per_node }}Ki - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=eventer - --poll-period=300000 - - --estimator=exponential serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index b41f1024d17..c34f5ef1afa 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -21,29 +21,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.4.0-beta.0 + name: heapster-v1.4.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.4.0-beta.0 + version: v1.4.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 template: metadata: labels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: heapster livenessProbe: httpGet: @@ -55,7 +55,7 @@ spec: command: - /heapster - --source=kubernetes.summary_api:'' - - --sink=stackdriver + - --sink=stackdriver:?cluster_name={{ cluster_name }} # TODO: add --disable_export when it's merged into Heapster release volumeMounts: - name: ssl-certs @@ -64,7 +64,7 @@ spec: - name: usr-ca-certs mountPath: /usr/share/ca-certificates readOnly: true - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: heapster-nanny resources: limits: @@ -88,11 +88,9 @@ spec: - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} - --extra-memory={{metrics_memory_per_node}}Mi - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=heapster - --poll-period=300000 - - --estimator=exponential volumes: - name: ssl-certs hostPath: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index 751d67bb047..aa329ccb44a 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -21,29 +21,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.4.0-beta.0 + name: heapster-v1.4.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.4.0-beta.0 + version: v1.4.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 template: metadata: labels: k8s-app: heapster - version: v1.4.0-beta.0 + version: v1.4.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0 + - image: gcr.io/google_containers/heapster-amd64:v1.4.0 name: heapster livenessProbe: httpGet: @@ -55,7 +55,7 @@ spec: command: - /heapster - --source=kubernetes.summary_api:'' - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:2.0 name: heapster-nanny resources: limits: @@ -79,11 +79,9 @@ spec: - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - - --threshold=5 - - --deployment=heapster-v1.4.0-beta.0 + - --deployment=heapster-v1.4.0 - --container=heapster - --poll-period=300000 - - --estimator=exponential serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index 5d971154827..29f0431df6e 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -1,20 +1,20 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-es-v1.22 + name: fluentd-es-v1.24 namespace: kube-system labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.22 + version: v1.24 spec: template: metadata: labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" - version: v1.22 + version: v1.24 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -24,7 +24,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: gcr.io/google_containers/fluentd-elasticsearch:1.23 + image: gcr.io/google_containers/fluentd-elasticsearch:1.24 command: - '/bin/sh' - '-c' diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 9747b5c504d..d1747deab43 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/google_containers IMAGE = fluentd-elasticsearch -TAG = 1.23 +TAG = 1.24 build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/build.sh b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/build.sh index d0d9bbe6517..924b7c83a4d 100755 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/build.sh +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/build.sh @@ -32,6 +32,7 @@ sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/in # http://docs.fluentd.org/articles/plugin-management td-agent-gem install --no-document fluent-plugin-kubernetes_metadata_filter -v 0.27.0 td-agent-gem install --no-document fluent-plugin-elasticsearch -v 1.9.5 +td-agent-gem install --no-document fluent-plugin-prometheus -v 0.3.0 # Remove docs and postgres references rm -rf /opt/td-agent/embedded/share/doc \ diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf index 0f4c557916b..0b42639f752 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf @@ -283,6 +283,44 @@ type kubernetes_metadata +# Prometheus Exporter Plugin +# input plugin that exports metrics + + type prometheus + + + + type monitor_agent + + + + type forward + + +# input plugin that collects metrics from MonitorAgent + + @type prometheus_monitor + + host ${hostname} + + + +# input plugin that collects metrics for output plugin + + @type prometheus_output_monitor + + host ${hostname} + + + +# input plugin that collects metrics for in_tail plugin + + @type prometheus_tail_monitor + + host ${hostname} + + + type elasticsearch log_level info diff --git a/cluster/addons/fluentd-gcp/event-exporter.yaml b/cluster/addons/fluentd-gcp/event-exporter.yaml index e3953d65d49..a76691df219 100644 --- a/cluster/addons/fluentd-gcp/event-exporter.yaml +++ b/cluster/addons/fluentd-gcp/event-exporter.yaml @@ -46,7 +46,7 @@ spec: containers: # TODO: Add resources in 1.8 - name: event-exporter - image: gcr.io/google-containers/event-exporter:v0.1.0-r2 + image: gcr.io/google-containers/event-exporter:v0.1.4 command: - '/event-exporter' - name: prometheus-to-sd-exporter diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml index a1d7a5d1abf..be77defb781 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml @@ -70,27 +70,14 @@ data: # Detect exceptions in the log output and forward them as one log entry. - @type copy + @type detect_exceptions - - @type prometheus - - - type counter - name logging_line_count - desc Total number of lines generated by application containers - - - - @type detect_exceptions - - remove_tag_prefix raw - message log - stream stream - multiline_flush_interval 5 - max_bytes 500000 - max_lines 1000 - + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 system.input.conf: |- # Example: @@ -291,6 +278,14 @@ data: read_from_head true tag kubelet + + + type systemd + filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }] + pos_file /var/log/gcp-journald-node-problem-detector.pos + read_from_head true + tag node-problem-detector + monitoring.conf: |- # Prometheus monitoring @@ -342,77 +337,50 @@ data: # compute.googleapis.com service rather than container.googleapis.com to keep # them separate since most users don't care about the node logs. - @type copy + @type google_cloud - - @type google_cloud - - # Set the buffer type to file to improve the reliability and reduce the memory consumption - buffer_type file - buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer - # Set queue_full action to block because we want to pause gracefully - # in case of the off-the-limits load instead of throwing an exception - buffer_queue_full_action block - # Set the chunk limit conservatively to avoid exceeding the GCL limit - # of 10MiB per write request. - buffer_chunk_limit 2M - # Cap the combined memory usage of this buffer and the one below to - # 2MiB/chunk * (6 + 2) chunks = 16 MiB - buffer_queue_limit 6 - # Never wait more than 5 seconds before flushing logs in the non-error case. - flush_interval 5s - # Never wait longer than 30 seconds between retries. - max_retry_wait 30 - # Disable the limit on the number of retries (retry forever). - disable_retry_limit - # Use multiple threads for processing. - num_threads 2 - - - @type prometheus - - - type counter - name logging_entry_count - desc Total number of log entries generated by either application containers or system components - - component container - - - + # Collect metrics in Prometheus registry about plugin activity. + enable_monitoring true + monitoring_type prometheus + # Set the buffer type to file to improve the reliability and reduce the memory consumption + buffer_type file + buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer + # Set queue_full action to block because we want to pause gracefully + # in case of the off-the-limits load instead of throwing an exception + buffer_queue_full_action block + # Set the chunk limit conservatively to avoid exceeding the GCL limit + # of 10MiB per write request. + buffer_chunk_limit 2M + # Cap the combined memory usage of this buffer and the one below to + # 2MiB/chunk * (6 + 2) chunks = 16 MiB + buffer_queue_limit 6 + # Never wait more than 5 seconds before flushing logs in the non-error case. + flush_interval 5s + # Never wait longer than 30 seconds between retries. + max_retry_wait 30 + # Disable the limit on the number of retries (retry forever). + disable_retry_limit + # Use multiple threads for processing. + num_threads 2 # Keep a smaller buffer here since these logs are less important than the user's # container logs. - @type copy + @type google_cloud - - @type google_cloud - - detect_subservice false - buffer_type file - buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer - buffer_queue_full_action block - buffer_chunk_limit 2M - buffer_queue_limit 2 - flush_interval 5s - max_retry_wait 30 - disable_retry_limit - num_threads 2 - - - @type prometheus - - - type counter - name logging_entry_count - desc Total number of log entries generated by either application containers or system components - - component system - - - + enable_monitoring true + monitoring_type prometheus + detect_subservice false + buffer_type file + buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer + buffer_queue_full_action block + buffer_chunk_limit 2M + buffer_queue_limit 2 + flush_interval 5s + max_retry_wait 30 + disable_retry_limit + num_threads 2 metadata: name: fluentd-gcp-config-v1.1 diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index ba7bc37676c..c4304f5c156 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -27,7 +27,7 @@ spec: hostNetwork: true containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.7 + image: gcr.io/google-containers/fluentd-gcp:2.0.8 # If fluentd consumes its own logs, the following situation may happen: # fluentd fails to send a chunk to the server => writes it to the log => # tries to send this message to the server => fails to send a chunk and so on. @@ -90,13 +90,13 @@ spec: exit 1; fi; - name: prometheus-to-sd-exporter - image: gcr.io/google-containers/prometheus-to-sd:v0.1.0 + image: gcr.io/google-containers/prometheus-to-sd:v0.1.3 command: - /monitor - --component=fluentd - --target-port=31337 - --stackdriver-prefix=container.googleapis.com/internal/addons - - --whitelisted-metrics=logging_line_count,logging_entry_count + - --whitelisted-metrics=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count volumeMounts: - name: ssl-certs mountPath: /etc/ssl/certs @@ -107,6 +107,9 @@ spec: effect: "NoSchedule" - operator: "Exists" effect: "NoExecute" + #TODO: remove this toleration once #44445 is properly fixed. + - operator: "Exists" + effect: "NoSchedule" terminationGracePeriodSeconds: 30 volumes: - name: varlog diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md b/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md index f4caa66c8ee..468c0344876 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md @@ -1,6 +1,6 @@ # Collecting Docker Log Files with Fluentd and sending to GCP. -The image was moved to the the +The image was moved to the [new location](https://github.com/kubernetes/contrib/tree/master/fluentd/fluentd-gcp-image). [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md?pixel)]() diff --git a/cluster/addons/node-problem-detector/npd.yaml b/cluster/addons/node-problem-detector/npd.yaml index a075a561f56..0790f6cccb5 100644 --- a/cluster/addons/node-problem-detector/npd.yaml +++ b/cluster/addons/node-problem-detector/npd.yaml @@ -80,3 +80,5 @@ spec: tolerations: - operator: "Exists" effect: "NoExecute" + - key: "CriticalAddonsOnly" + operator: "Exists" diff --git a/cluster/common.sh b/cluster/common.sh index 0bcc69685f0..476f2c940d7 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -605,6 +605,7 @@ function build-kube-env { rm -f ${file} cat >$file <>$file <>$file <>$file </etc/gce.conf [global] EOF + if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then + cat <>/etc/gce.conf +api-endpoint = ${GCE_API_ENDPOINT} +EOF + fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then use_cloud_config="true" cat <>/etc/gce.conf @@ -232,6 +237,11 @@ token-body = ${TOKEN_BODY} project-id = ${PROJECT_ID} network-name = ${NODE_NETWORK} EOF + if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then + cat <>/etc/gce.conf +network-project-id = ${NETWORK_PROJECT_ID} +EOF + fi if [[ -n "${NODE_SUBNETWORK:-}" ]]; then cat <>/etc/gce.conf subnetwork-name = ${NODE_SUBNETWORK} @@ -983,7 +993,16 @@ function start-kube-apiserver { local container_env="" if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," + container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" + fi + if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then + if [[ -n "${container_env}" ]]; then + container_env="${container_env}, " + fi + container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"" + fi + if [[ -n "${container_env}" ]]; then + container_env="\"env\":[{${container_env}}]," fi src_file="${src_dir}/kube-apiserver.manifest" @@ -1168,6 +1187,8 @@ function setup-addon-manifests { } # Prepares the manifests of k8s addons, and starts the addon manager. +# Vars assumed: +# CLUSTER_NAME function start-kube-addons { echo "Prepare kube-addons manifests and start kube addon manager" local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" @@ -1205,6 +1226,7 @@ function start-kube-addons { controller_yaml="${controller_yaml}/heapster-controller.yaml" fi remove-salt-config-comments "${controller_yaml}" + sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}" sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}" sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}" sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}" diff --git a/cluster/gce/container-linux/node-helper.sh b/cluster/gce/container-linux/node-helper.sh index 784d36af05a..c2432c5b0ea 100755 --- a/cluster/gce/container-linux/node-helper.sh +++ b/cluster/gce/container-linux/node-helper.sh @@ -17,14 +17,19 @@ # A library of helper functions and constant for the Container Linux distro. source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh" +function get-node-instance-metadata { + local metadata="" + metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," + metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml," + metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh," + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" + echo "${metadata}" +} + # $1: template name (required). function create-node-instance-template { local template_name="$1" - create-node-template "$template_name" "${scope_flags[*]}" \ - "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ - "user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml" \ - "configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh" \ - "cluster-name=${KUBE_TEMP}/cluster-name.txt" + create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)" # TODO(euank): We should include update-strategy here. We should also switch to ignition } diff --git a/cluster/gce/debian/node-helper.sh b/cluster/gce/debian/node-helper.sh index 58c1a04562b..b62930f0e34 100755 --- a/cluster/gce/debian/node-helper.sh +++ b/cluster/gce/debian/node-helper.sh @@ -16,12 +16,17 @@ # A library of helper functions and constant for debian os distro +function get-node-instance-metadata { + local metadata="" + metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh," + metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" + echo "${metadata}" +} + # $1: template name (required) function create-node-instance-template { local template_name="$1" prepare-startup-script - create-node-template "$template_name" "${scope_flags}" \ - "startup-script=${KUBE_TEMP}/configure-vm.sh" \ - "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ - "cluster-name=${KUBE_TEMP}/cluster-name.txt" + create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)" } diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 8339b1224ad..165c1996cd5 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -379,6 +379,11 @@ function create-master-auth { cat </etc/gce.conf [global] EOF + if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then + cat <>/etc/gce.conf +api-endpoint = ${GCE_API_ENDPOINT} +EOF + fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then use_cloud_config="true" cat <>/etc/gce.conf @@ -387,6 +392,11 @@ token-body = ${TOKEN_BODY} project-id = ${PROJECT_ID} network-name = ${NODE_NETWORK} EOF + if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then + cat <>/etc/gce.conf +network-project-id = ${NETWORK_PROJECT_ID} +EOF + fi if [[ -n "${NODE_SUBNETWORK:-}" ]]; then cat <>/etc/gce.conf subnetwork-name = ${NODE_SUBNETWORK} @@ -912,7 +922,11 @@ function start-kubelet { flags+=" --cni-bin-dir=/home/kubernetes/bin" if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then # Calico uses CNI always. - flags+=" --network-plugin=cni" + if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" ]]; then + flags+=" --network-plugin=${NETWORK_PROVIDER}" + else + flags+=" --network-plugin=cni" + fi else # Otherwise use the configured value. flags+=" --network-plugin=${NETWORK_PROVIDER}" @@ -1182,6 +1196,7 @@ function prepare-mounter-rootfs { mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet" mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc" mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev" + mount --bind -o ro /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/resolv.conf" } # A helper function for removing salt configuration and comments from a file. @@ -1398,7 +1413,16 @@ function start-kube-apiserver { local container_env="" if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," + container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" + fi + if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then + if [[ -n "${container_env}" ]]; then + container_env="${container_env}, " + fi + container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"" + fi + if [[ -n "${container_env}" ]]; then + container_env="\"env\":[{${container_env}}]," fi if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then @@ -1597,6 +1621,8 @@ function setup-addon-manifests { } # Prepares the manifests of k8s addons, and starts the addon manager. +# Vars assumed: +# CLUSTER_NAME function start-kube-addons { echo "Prepare kube-addons manifests and start kube addon manager" local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" @@ -1634,6 +1660,7 @@ function start-kube-addons { controller_yaml="${controller_yaml}/heapster-controller.yaml" fi remove-salt-config-comments "${controller_yaml}" + sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}" sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}" sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}" sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}" @@ -1701,7 +1728,7 @@ function start-kube-addons { sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}" sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}" else - # If not configured to use Calico, the set the typha replica count to 0, but only if the + # If not configured to use Calico, the set the typha replica count to 0, but only if the # addon is present. local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml" if [[ -e $typha_dep_file ]]; then diff --git a/cluster/gce/gci/node-helper.sh b/cluster/gce/gci/node-helper.sh index 3217b4e09da..45cdfe6cf1d 100755 --- a/cluster/gce/gci/node-helper.sh +++ b/cluster/gce/gci/node-helper.sh @@ -17,16 +17,21 @@ # A library of helper functions and constant for GCI distro source "${KUBE_ROOT}/cluster/gce/gci/helper.sh" +function get-node-instance-metadata { + local metadata="" + metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," + metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml," + metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh," + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt," + metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt," + metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt," + metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt" + echo "${metadata}" +} + # $1: template name (required). function create-node-instance-template { local template_name="$1" ensure-gci-metadata-files - create-node-template "$template_name" "${scope_flags[*]}" \ - "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ - "user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml" \ - "configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh" \ - "cluster-name=${KUBE_TEMP}/cluster-name.txt" \ - "gci-update-strategy=${KUBE_TEMP}/gci-update.txt" \ - "gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt" \ - "gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt" + create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)" } diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 3c281f60eeb..ed509660fcf 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -345,6 +345,11 @@ function detect-node-names() { --format='value(instance)')) done fi + # Add heapster node name to the list too (if it exists). + if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster") + fi + echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2 echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2 } @@ -510,7 +515,7 @@ function make-gcloud-network-argument() { ret="${ret},aliases=pods-default:${alias_size}" ret="${ret} --no-can-ip-forward" else - if [[ ${PREEXISTING_NETWORK} = "true" && "${PREEXISTING_NETWORK_MODE}" != "custom" ]]; then + if [[ ${ENABLE_BIG_CLUSTER_SUBNETS} != "true" || (${PREEXISTING_NETWORK} = "true" && "${PREEXISTING_NETWORK_MODE}" != "custom") ]]; then ret="--network ${network}" else ret="--subnet=${network}" @@ -533,7 +538,7 @@ function get-template-name-from-version() { # Robustly try to create an instance template. # $1: The name of the instance template. # $2: The scopes flag. -# $3 and others: Metadata entries (must all be from a file). +# $3: String of comma-separated metadata entries (must all be from a file). function create-node-template() { detect-project local template_name="$1" @@ -600,7 +605,7 @@ function create-node-template() { ${network} \ ${preemptible_minions} \ $2 \ - --metadata-from-file $(echo ${@:3} | tr ' ' ',') >&2; then + --metadata-from-file $3 >&2; then if (( attempt > 5 )); then echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2 exit 2 @@ -1237,21 +1242,24 @@ function create-nodes-firewall() { } } -function create-nodes-template() { - echo "Creating minions." - - # TODO(zmerlynn): Refactor setting scope flags. +function get-scope-flags() { local scope_flags= if [[ -n "${NODE_SCOPES}" ]]; then scope_flags="--scopes ${NODE_SCOPES}" else scope_flags="--no-scopes" fi + echo "${scope_flags}" +} + +function create-nodes-template() { + echo "Creating nodes." + + local scope_flags=$(get-scope-flags) write-node-env local template_name="${NODE_INSTANCE_PREFIX}-template" - create-node-instance-template $template_name } @@ -1279,7 +1287,13 @@ function set_num_migs() { function create-nodes() { local template_name="${NODE_INSTANCE_PREFIX}-template" - local instances_left=${NUM_NODES} + if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + local -r nodes="${NUM_NODES}" + else + local -r nodes=$(( NUM_NODES - 1 )) + fi + + local instances_left=${nodes} #TODO: parallelize this loop to speed up the process for ((i=1; i<=${NUM_MIGS}; i++)); do @@ -1305,6 +1319,47 @@ function create-nodes() { --zone "${ZONE}" \ --project "${PROJECT}" || true; done + + if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}" + create-heapster-node + fi +} + +# Assumes: +# - NODE_INSTANCE_PREFIX +# - PROJECT +# - ZONE +# - HEAPSTER_MACHINE_TYPE +# - NODE_DISK_TYPE +# - NODE_DISK_SIZE +# - NODE_IMAGE_PROJECT +# - NODE_IMAGE +# - NODE_TAG +# - NETWORK +# - ENABLE_IP_ALIASES +# - IP_ALIAS_SUBNETWORK +# - IP_ALIAS_SIZE +function create-heapster-node() { + local network=$(make-gcloud-network-argument \ + "${NETWORK}" "" \ + "${ENABLE_IP_ALIASES:-}" \ + "${IP_ALIAS_SUBNETWORK:-}" \ + "${IP_ALIAS_SIZE:-}") + + gcloud compute instances \ + create "${NODE_INSTANCE_PREFIX}-heapster" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type="${HEAPSTER_MACHINE_TYPE}" \ + --boot-disk-type "${NODE_DISK_TYPE}" \ + --boot-disk-size "${NODE_DISK_SIZE}" \ + --image-project="${NODE_IMAGE_PROJECT}" \ + --image "${NODE_IMAGE}" \ + --tags "${NODE_TAG}" \ + ${network} \ + $(get-scope-flags) \ + --metadata-from-file "$(get-node-instance-metadata)" } # Assumes: @@ -1505,6 +1560,20 @@ function kube-down() { "${template}" fi done + + # Delete the special heapster node (if it exists). + if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + local -r heapster_machine_name="${NODE_INSTANCE_PREFIX}-heapster" + if gcloud compute instances describe "${heapster_machine_name}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + # Now we can safely delete the VM. + gcloud compute instances delete \ + --project "${PROJECT}" \ + --quiet \ + --delete-disks all \ + --zone "${ZONE}" \ + "${heapster_machine_name}" + fi + fi fi local -r REPLICA_NAME="${KUBE_REPLICA_NAME:-$(get-replica-name)}" @@ -1875,13 +1944,7 @@ function prepare-push() { if [[ "${node}" == "true" ]]; then write-node-env - # TODO(zmerlynn): Refactor setting scope flags. - local scope_flags= - if [[ -n "${NODE_SCOPES}" ]]; then - scope_flags="--scopes ${NODE_SCOPES}" - else - scope_flags="--no-scopes" - fi + local scope_flags=$(get-scope-flags) # Ugly hack: Since it is not possible to delete instance-template that is currently # being used, create a temp one, then delete the old one and recreate it once again. diff --git a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py index af46cfbadf8..543a4fa6fd5 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py +++ b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py @@ -29,6 +29,7 @@ from charms.layer import nginx from subprocess import Popen from subprocess import PIPE from subprocess import STDOUT +from subprocess import CalledProcessError @when('certificates.available') @@ -49,6 +50,16 @@ def request_server_certificates(tls): tls.request_server_cert(common_name, sans, certificate_name) +@when('config.changed.port') +def close_old_port(): + config = hookenv.config() + old_port = config.previous('port') + try: + hookenv.close_port(old_port) + except CalledProcessError: + hookenv.log('Port %d already closed, skipping.' % old_port) + + @when('nginx.available', 'apiserver.available', 'certificates.server.cert.available') def install_load_balancer(apiserver, tls): @@ -63,20 +74,23 @@ def install_load_balancer(apiserver, tls): if cert_exists and key_exists: # At this point the cert and key exist, and they are owned by root. chown = ['chown', 'www-data:www-data', server_cert_path] + # Change the owner to www-data so the nginx process can read the cert. subprocess.call(chown) chown = ['chown', 'www-data:www-data', server_key_path] + # Change the owner to www-data so the nginx process can read the key. subprocess.call(chown) - hookenv.open_port(hookenv.config('port')) + port = hookenv.config('port') + hookenv.open_port(port) services = apiserver.services() nginx.configure_site( 'apilb', 'apilb.conf', server_name='_', services=services, - port=hookenv.config('port'), + port=port, server_certificate=server_cert_path, server_key=server_key_path, ) diff --git a/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf b/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf index 0cb18f58e72..6d1b23e25bf 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf +++ b/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf @@ -8,7 +8,7 @@ upstream target_service { server { - listen 443 ssl http2; + listen {{ port }} ssl http2; server_name {{ server_name }}; access_log /var/log/nginx.access.log; @@ -33,9 +33,6 @@ server { proxy_set_header Connection $http_connection; proxy_set_header X-Stream-Protocol-Version $http_x_stream_protocol_version; - proxy_ssl_certificate {{ server_certificate }}; - proxy_ssl_certificate_key {{ server_key }}; - add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version; proxy_pass https://target_service; diff --git a/cluster/juju/layers/kubernetes-e2e/README.md b/cluster/juju/layers/kubernetes-e2e/README.md index d60fbd0c2dc..3f516d7fc42 100644 --- a/cluster/juju/layers/kubernetes-e2e/README.md +++ b/cluster/juju/layers/kubernetes-e2e/README.md @@ -21,7 +21,8 @@ and then relate the `kubernetes-e2e` charm. ```shell juju deploy kubernetes-core juju deploy cs:~containers/kubernetes-e2e -juju add-relation kubernetes-e2e kubernetes-master +juju add-relation kubernetes-e2e:kube-control kubernetes-master:kube-control +juju add-relation kubernetes-e2e:kubernetes-master kubernetes-master:kube-api-endpoint juju add-relation kubernetes-e2e easyrsa ``` diff --git a/cluster/juju/layers/kubernetes-e2e/layer.yaml b/cluster/juju/layers/kubernetes-e2e/layer.yaml index 06d431b0452..b913883bb0d 100644 --- a/cluster/juju/layers/kubernetes-e2e/layer.yaml +++ b/cluster/juju/layers/kubernetes-e2e/layer.yaml @@ -4,6 +4,7 @@ includes: - layer:tls-client - layer:snap - interface:http + - interface:kube-control options: tls-client: ca_certificate_path: '/srv/kubernetes/ca.crt' diff --git a/cluster/juju/layers/kubernetes-e2e/metadata.yaml b/cluster/juju/layers/kubernetes-e2e/metadata.yaml index a67fd67ee62..9f4690a1e2b 100644 --- a/cluster/juju/layers/kubernetes-e2e/metadata.yaml +++ b/cluster/juju/layers/kubernetes-e2e/metadata.yaml @@ -14,6 +14,8 @@ series: requires: kubernetes-master: interface: http + kube-control: + interface: kube-control resources: kubectl: type: file @@ -23,3 +25,4 @@ resources: type: file filename: kubernetes-test.snap description: kubernetes-test snap + diff --git a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py index 1ab6f5f7391..76a97aa0d63 100644 --- a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py +++ b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py @@ -38,15 +38,22 @@ def reset_delivery_states(): @when('kubernetes-e2e.installed') +def report_status(): + ''' Report the status of the charm. ''' + messaging() + + def messaging(): ''' Probe our relations to determine the propper messaging to the end user ''' missing_services = [] if not is_state('kubernetes-master.available'): - missing_services.append('kubernetes-master') + missing_services.append('kubernetes-master:http') if not is_state('certificates.available'): missing_services.append('certificates') + if not is_state('kubeconfig.ready'): + missing_services.append('kubernetes-master:kube-control') if missing_services: if len(missing_services) > 1: @@ -80,16 +87,15 @@ def install_snaps(): @when('tls_client.ca.saved', 'tls_client.client.certificate.saved', 'tls_client.client.key.saved', 'kubernetes-master.available', - 'kubernetes-e2e.installed') + 'kubernetes-e2e.installed', 'kube-control.auth.available') @when_not('kubeconfig.ready') -def prepare_kubeconfig_certificates(master): +def prepare_kubeconfig_certificates(master, kube_control): ''' Prepare the data to feed to create the kubeconfig file. ''' layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') - key = layer_options.get('client_key_path') - cert = layer_options.get('client_certificate_path') + creds = kube_control.get_auth_credentials() servers = get_kube_api_servers(master) @@ -97,17 +103,28 @@ def prepare_kubeconfig_certificates(master): kubeconfig_path = '/home/ubuntu/.kube/config' # Create kubernetes configuration in the default location for ubuntu. - create_kubeconfig('/root/.kube/config', servers[0], ca, key, cert, - user='root') - create_kubeconfig(kubeconfig_path, servers[0], ca, key, cert, - user='ubuntu') + create_kubeconfig('/root/.kube/config', servers[0], ca, + token=creds['client_token'], user='root') + create_kubeconfig(kubeconfig_path, servers[0], ca, + token=creds['client_token'], user='ubuntu') # Set permissions on the ubuntu users kubeconfig to ensure a consistent UX cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path] check_call(cmd) - + messaging() set_state('kubeconfig.ready') +@when('kube-control.connected') +def request_credentials(kube_control): + """ Request authorization creds.""" + + # The kube-cotrol interface is created to support RBAC. + # At this point we might as well do the right thing and return the hostname + # even if it will only be used when we enable RBAC + user = 'system:masters' + kube_control.set_auth_request(user) + + @when('kubernetes-e2e.installed', 'kubeconfig.ready') def set_app_version(): ''' Declare the application version to juju ''' @@ -124,19 +141,40 @@ def set_app_version(): hookenv.application_version_set(version_from.rstrip()) -def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu', - context='juju-context', cluster='juju-cluster'): +def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, + user='ubuntu', context='juju-context', + cluster='juju-cluster', password=None, token=None): '''Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user context and cluster.''' + if not key and not certificate and not password and not token: + raise ValueError('Missing authentication mechanism.') + + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + if token and password: + raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ '--server={2} --certificate-authority={3} --embed-certs=true' check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + # Delete old users + cmd = 'kubectl config --kubeconfig={0} unset users' + check_call(split(cmd.format(kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \ - '--client-key={2} --client-certificate={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, user, key, certificate))) + cmd = 'kubectl config --kubeconfig={0} ' \ + 'set-credentials {1} '.format(kubeconfig, user) + + if key and certificate: + cmd = '{0} --client-key={1} --client-certificate={2} '\ + '--embed-certs=true'.format(cmd, key, certificate) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) # Create a default context with the cluster. cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ '--cluster={2} --user={3}' diff --git a/cluster/juju/layers/kubernetes-master/actions/namespace-create b/cluster/juju/layers/kubernetes-master/actions/namespace-create index 56a6f5c8e40..63ba89d8e5a 100755 --- a/cluster/juju/layers/kubernetes-master/actions/namespace-create +++ b/cluster/juju/layers/kubernetes-master/actions/namespace-create @@ -1,5 +1,5 @@ #!/usr/bin/env python3 - +import os from yaml import safe_load as load from charmhelpers.core.hookenv import ( action_get, @@ -11,6 +11,9 @@ from charms.templating.jinja2 import render from subprocess import check_output +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + + def kubectl(args): cmd = ['kubectl'] + args return check_output(cmd) diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index 0fde833377a..b58087c24e3 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -26,3 +26,8 @@ options: default: "stable" description: | Snap channel to install Kubernetes master services from + client_password: + type: string + default: "" + description: | + Password to be used for admin user (leave empty for random password). diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 0b729b1a974..fc3196ff293 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -37,7 +37,7 @@ from charms.reactive import remove_state from charms.reactive import set_state from charms.reactive import is_state from charms.reactive import when, when_any, when_not -from charms.reactive.helpers import data_changed +from charms.reactive.helpers import data_changed, any_file_changed from charms.kubernetes.common import get_version from charms.kubernetes.common import retry from charms.kubernetes.flagmanager import FlagManager @@ -45,6 +45,7 @@ from charms.kubernetes.flagmanager import FlagManager from charmhelpers.core import hookenv from charmhelpers.core import host from charmhelpers.core import unitdata +from charmhelpers.core.host import service_stop from charmhelpers.core.templating import render from charmhelpers.fetch import apt_install from charmhelpers.contrib.charmsupport import nrpe @@ -77,8 +78,8 @@ def reset_states_for_delivery(): '''An upgrade charm event was triggered by Juju, react to that here.''' migrate_from_pre_snaps() install_snaps() + set_state('reconfigure.authentication.setup') remove_state('authentication.setup') - remove_state('kubernetes-master.components.started') def rename_file_idempotent(source, destination): @@ -155,6 +156,7 @@ def install_snaps(): hookenv.status_set('maintenance', 'Installing cdk-addons snap') snap.install('cdk-addons', channel=channel) set_state('kubernetes-master.snaps.installed') + remove_state('kubernetes-master.components.started') @when('config.changed.channel') @@ -162,6 +164,22 @@ def channel_changed(): install_snaps() +@when('config.changed.client_password', 'leadership.is_leader') +def password_changed(): + """Handle password change via the charms config.""" + password = hookenv.config('client_password') + if password == "" and is_state('client.password.initialised'): + # password_changed is called during an upgrade. Nothing to do. + return + elif password == "": + # Password not initialised + password = token_generator() + setup_basic_auth(password, "admin", "admin") + set_state('reconfigure.authentication.setup') + remove_state('authentication.setup') + set_state('client.password.initialised') + + @when('cni.connected') @when_not('cni.configured') def configure_cni(cni): @@ -187,19 +205,23 @@ def setup_leader_authentication(): keys = [service_key, basic_auth, known_tokens] # Try first to fetch data from an old leadership broadcast. - if not get_keys_from_leader(keys): - if not os.path.isfile(basic_auth): - setup_basic_auth('admin', 'admin', 'admin') + if not get_keys_from_leader(keys) \ + or is_state('reconfigure.authentication.setup'): + last_pass = get_password('basic_auth.csv', 'admin') + setup_basic_auth(last_pass, 'admin', 'admin') + if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') + # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) if not os.path.isfile(service_key): cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) + remove_state('reconfigure.authentication.setup') api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) @@ -215,36 +237,42 @@ def setup_leader_authentication(): # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) - + remove_state('kubernetes-master.components.started') set_state('authentication.setup') @when_not('leadership.is_leader') -@when_not('authentication.setup') def setup_non_leader_authentication(): - api_opts = FlagManager('kube-apiserver') - controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' - hookenv.status_set('maintenance', 'Rendering authentication templates.') - keys = [service_key, basic_auth, known_tokens] - if not get_keys_from_leader(keys): + # The source of truth for non-leaders is the leader. + # Therefore we overwrite_local with whatever the leader has. + if not get_keys_from_leader(keys, overwrite_local=True): # the keys were not retrieved. Non-leaders have to retry. return + if not any_file_changed(keys) and is_state('authentication.setup'): + # No change detected and we have already setup the authentication + return + + hookenv.status_set('maintenance', 'Rendering authentication templates.') + api_opts = FlagManager('kube-apiserver') api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) api_opts.add('service-account-key-file', service_key) + + controller_opts = FlagManager('kube-controller-manager') controller_opts.add('service-account-private-key-file', service_key) + remove_state('kubernetes-master.components.started') set_state('authentication.setup') -def get_keys_from_leader(keys): +def get_keys_from_leader(keys, overwrite_local=False): """ Gets the broadcasted keys from the leader and stores them in the corresponding files. @@ -261,7 +289,7 @@ def get_keys_from_leader(keys): for k in keys: # If the path does not exist, assume we need it - if not os.path.exists(k): + if not os.path.exists(k) or overwrite_local: # Fetch data from leadership broadcast contents = charms.leadership.leader_get(k) # Default to logging the warning and wait for leader data to be set @@ -351,6 +379,22 @@ def send_cluster_dns_detail(kube_control): kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) +@when('kube-control.auth.requested') +@when('authentication.setup') +@when('leadership.is_leader') +def send_tokens(kube_control): + """Send the tokens to the workers.""" + kubelet_token = get_token('kubelet') + proxy_token = get_token('kube_proxy') + admin_token = get_token('admin') + + # Send the data + requests = kube_control.auth_user() + for request in requests: + kube_control.sign_auth_request(request[0], kubelet_token, + proxy_token, admin_token) + + @when_not('kube-control.connected') def missing_kube_control(): """Inform the operator they need to add the kube-control relation. @@ -448,7 +492,7 @@ def addons_ready(): @when('loadbalancer.available', 'certificates.ca.available', - 'certificates.client.cert.available') + 'certificates.client.cert.available', 'authentication.setup') def loadbalancer_kubeconfig(loadbalancer, ca, client): # Get the potential list of loadbalancers from the relation object. hosts = loadbalancer.get_addresses_ports() @@ -460,7 +504,8 @@ def loadbalancer_kubeconfig(loadbalancer, ca, client): build_kubeconfig(server) -@when('certificates.ca.available', 'certificates.client.cert.available') +@when('certificates.ca.available', 'certificates.client.cert.available', + 'authentication.setup') @when_not('loadbalancer.available') def create_self_config(ca, client): '''Create a kubernetes configuration for the master unit.''' @@ -651,6 +696,16 @@ def disable_gpu_mode(): remove_state('kubernetes-master.gpu.enabled') +@hook('stop') +def shutdown(): + """ Stop the kubernetes master services + + """ + service_stop('snap.kube-apiserver.daemon') + service_stop('snap.kube-controller-manager.daemon') + service_stop('snap.kube-scheduler.daemon') + + def arch(): '''Return the package architecture as a string. Raise an exception if the architecture is not supported by kubernetes.''' @@ -669,37 +724,54 @@ def build_kubeconfig(server): # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') ca_exists = ca and os.path.isfile(ca) - key = layer_options.get('client_key_path') - key_exists = key and os.path.isfile(key) - cert = layer_options.get('client_certificate_path') - cert_exists = cert and os.path.isfile(cert) + client_pass = get_password('basic_auth.csv', 'admin') # Do we have everything we need? - if ca_exists and key_exists and cert_exists: - # Cache last server string to know if we need to regenerate the config. - if not data_changed('kubeconfig.server', server): - return + if ca_exists and client_pass: # Create an absolute path for the kubeconfig file. kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config') # Create the kubeconfig on this system so users can access the cluster. - create_kubeconfig(kubeconfig_path, server, ca, key, cert) + + create_kubeconfig(kubeconfig_path, server, ca, + user='admin', password=client_pass) # Make the config file readable by the ubuntu users so juju scp works. cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path] check_call(cmd) -def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu', - context='juju-context', cluster='juju-cluster'): +def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, + user='ubuntu', context='juju-context', + cluster='juju-cluster', password=None, token=None): '''Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user context and cluster.''' + if not key and not certificate and not password and not token: + raise ValueError('Missing authentication mechanism.') + + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + if token and password: + raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ '--server={2} --certificate-authority={3} --embed-certs=true' check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + # Delete old users + cmd = 'kubectl config --kubeconfig={0} unset users' + check_call(split(cmd.format(kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \ - '--client-key={2} --client-certificate={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, user, key, certificate))) + cmd = 'kubectl config --kubeconfig={0} ' \ + 'set-credentials {1} '.format(kubeconfig, user) + + if key and certificate: + cmd = '{0} --client-key={1} --client-certificate={2} '\ + '--embed-certs=true'.format(cmd, key, certificate) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) # Create a default context with the cluster. cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ '--cluster={2} --user={3}' @@ -786,7 +858,6 @@ def configure_master_services(): api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') - api_opts.add('client-ca-file', ca_cert_path) api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) @@ -826,6 +897,7 @@ def configure_master_services(): cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) + cmd = ( ['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ') @@ -835,14 +907,16 @@ def configure_master_services(): check_call(cmd) -def setup_basic_auth(username='admin', password='admin', user='admin'): +def setup_basic_auth(password=None, username='admin', uid='admin'): '''Create the htacces file and the tokens.''' root_cdk = '/root/cdk' if not os.path.isdir(root_cdk): os.makedirs(root_cdk) htaccess = os.path.join(root_cdk, 'basic_auth.csv') + if not password: + password = token_generator() with open(htaccess, 'w') as stream: - stream.write('{0},{1},{2}'.format(username, password, user)) + stream.write('{0},{1},{2}'.format(password, username, uid)) def setup_tokens(token, username, user): @@ -852,12 +926,49 @@ def setup_tokens(token, username, user): os.makedirs(root_cdk) known_tokens = os.path.join(root_cdk, 'known_tokens.csv') if not token: - alpha = string.ascii_letters + string.digits - token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32)) + token = token_generator() with open(known_tokens, 'a') as stream: stream.write('{0},{1},{2}\n'.format(token, username, user)) +def get_password(csv_fname, user): + '''Get the password of user within the csv file provided.''' + root_cdk = '/root/cdk' + tokens_fname = os.path.join(root_cdk, csv_fname) + if not os.path.isfile(tokens_fname): + return None + with open(tokens_fname, 'r') as stream: + for line in stream: + record = line.split(',') + if record[1] == user: + return record[0] + return None + + +def get_token(username): + """Grab a token from the static file if present. """ + return get_password('known_tokens.csv', username) + + +def set_token(password, save_salt): + ''' Store a token so it can be recalled later by token_generator. + + param: password - the password to be stored + param: save_salt - the key to store the value of the token.''' + db = unitdata.kv() + db.set(save_salt, password) + return db.get(save_salt) + + +def token_generator(length=32): + ''' Generate a random token for use in passwords and account tokens. + + param: length - the length of the token to generate''' + alpha = string.ascii_letters + string.digits + token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + @retry(times=3, delay_secs=10) def all_kube_system_pods_running(): ''' Check pod status in the kube-system namespace. Returns True if all @@ -871,7 +982,6 @@ def all_kube_system_pods_running(): return False result = json.loads(output) - for pod in result['items']: status = pod['status']['phase'] if status != 'Running': diff --git a/cluster/juju/layers/kubernetes-worker/layer.yaml b/cluster/juju/layers/kubernetes-worker/layer.yaml index 73678fa8b87..5cfc04020de 100644 --- a/cluster/juju/layers/kubernetes-worker/layer.yaml +++ b/cluster/juju/layers/kubernetes-worker/layer.yaml @@ -22,6 +22,7 @@ options: - 'ceph-common' - 'nfs-common' - 'socat' + - 'virt-what' tls-client: ca_certificate_path: '/root/cdk/ca.crt' server_certificate_path: '/root/cdk/server.crt' diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index b9ffdb32ea0..401e3a70480 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -149,6 +149,7 @@ def install_snaps(): hookenv.status_set('maintenance', 'Installing kube-proxy snap') snap.install('kube-proxy', channel=channel, classic=True) set_state('kubernetes-worker.snaps.installed') + set_state('kubernetes-worker.restart-needed') remove_state('kubernetes-worker.snaps.upgrade-needed') remove_state('kubernetes-worker.snaps.upgrade-specified') @@ -157,15 +158,15 @@ def install_snaps(): def shutdown(): ''' When this unit is destroyed: - delete the current node - - stop the kubelet service - - stop the kube-proxy service - - remove the 'kubernetes-worker.cni-plugins.installed' state + - stop the worker services ''' - if os.path.isfile(kubeconfig_path): - kubectl('delete', 'node', gethostname()) - service_stop('kubelet') - service_stop('kube-proxy') - remove_state('kubernetes-worker.cni-plugins.installed') + try: + if os.path.isfile(kubeconfig_path): + kubectl('delete', 'node', gethostname()) + except CalledProcessError: + hookenv.log('Failed to unregister node.') + service_stop('snap.kubelet.daemon') + service_stop('snap.kube-proxy.daemon') @when('docker.available') @@ -303,9 +304,10 @@ def watch_for_changes(kube_api, kube_control, cni): @when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available', 'tls_client.ca.saved', 'tls_client.client.certificate.saved', 'tls_client.client.key.saved', 'tls_client.server.certificate.saved', - 'tls_client.server.key.saved', 'kube-control.dns.available', + 'tls_client.server.key.saved', + 'kube-control.dns.available', 'kube-control.auth.available', 'cni.available', 'kubernetes-worker.restart-needed') -def start_worker(kube_api, kube_control, cni): +def start_worker(kube_api, kube_control, auth_control, cni): ''' Start kubelet using the provided API and DNS info.''' servers = get_kube_api_servers(kube_api) # Note that the DNS server doesn't necessarily exist at this point. We know @@ -320,10 +322,13 @@ def start_worker(kube_api, kube_control, cni): hookenv.log('Waiting for cluster cidr.') return + creds = kube_control.get_auth_credentials() + data_changed('kube-control.creds', creds) + # set --allow-privileged flag for kubelet set_privileged() - create_config(random.choice(servers)) + create_config(random.choice(servers), creds) configure_worker_services(servers, dns, cluster_cidr) set_state('kubernetes-worker.config.created') restart_unit_services() @@ -429,27 +434,25 @@ def arch(): return architecture -def create_config(server): +def create_config(server, creds): '''Create a kubernetes configuration for the worker unit.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') - key = layer_options.get('client_key_path') - cert = layer_options.get('client_certificate_path') # Create kubernetes configuration in the default location for ubuntu. - create_kubeconfig('/home/ubuntu/.kube/config', server, ca, key, cert, - user='ubuntu') + create_kubeconfig('/home/ubuntu/.kube/config', server, ca, + token=creds['client_token'], user='ubuntu') # Make the config dir readable by the ubuntu users so juju scp works. cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] check_call(cmd) # Create kubernetes configuration in the default location for root. - create_kubeconfig('/root/.kube/config', server, ca, key, cert, - user='root') + create_kubeconfig('/root/.kube/config', server, ca, + token=creds['client_token'], user='root') # Create kubernetes configuration for kubelet, and kube-proxy services. - create_kubeconfig(kubeconfig_path, server, ca, key, cert, - user='kubelet') + create_kubeconfig(kubeconfig_path, server, ca, + token=creds['kubelet_token'], user='kubelet') def configure_worker_services(api_servers, dns, cluster_cidr): @@ -464,7 +467,6 @@ def configure_worker_services(api_servers, dns, cluster_cidr): kubelet_opts.add('require-kubeconfig', 'true') kubelet_opts.add('kubeconfig', kubeconfig_path) kubelet_opts.add('network-plugin', 'cni') - kubelet_opts.add('logtostderr', 'true') kubelet_opts.add('v', '0') kubelet_opts.add('address', '0.0.0.0') kubelet_opts.add('port', '10250') @@ -474,6 +476,7 @@ def configure_worker_services(api_servers, dns, cluster_cidr): kubelet_opts.add('client-ca-file', ca_cert_path) kubelet_opts.add('tls-cert-file', server_cert_path) kubelet_opts.add('tls-private-key-file', server_key_path) + kubelet_opts.add('logtostderr', 'true') kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('cluster-cidr', cluster_cidr) @@ -482,25 +485,49 @@ def configure_worker_services(api_servers, dns, cluster_cidr): kube_proxy_opts.add('v', '0') kube_proxy_opts.add('master', random.choice(api_servers), strict=True) + if b'lxc' in check_output('virt-what', shell=True): + kube_proxy_opts.add('conntrack-max-per-core', '0') + cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ') check_call(cmd) cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ') check_call(cmd) -def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu', - context='juju-context', cluster='juju-cluster'): +def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, + user='ubuntu', context='juju-context', + cluster='juju-cluster', password=None, token=None): '''Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user context and cluster.''' + if not key and not certificate and not password and not token: + raise ValueError('Missing authentication mechanism.') + + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + if token and password: + raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ '--server={2} --certificate-authority={3} --embed-certs=true' check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + # Delete old users + cmd = 'kubectl config --kubeconfig={0} unset users' + check_call(split(cmd.format(kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \ - '--client-key={2} --client-certificate={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, user, key, certificate))) + cmd = 'kubectl config --kubeconfig={0} ' \ + 'set-credentials {1} '.format(kubeconfig, user) + + if key and certificate: + cmd = '{0} --client-key={1} --client-certificate={2} '\ + '--embed-certs=true'.format(cmd, key, certificate) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) # Create a default context with the cluster. cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ '--cluster={2} --user={3}' @@ -762,6 +789,26 @@ def notify_master_gpu_not_enabled(kube_control): kube_control.set_gpu(False) +@when('kube-control.connected') +def request_kubelet_and_proxy_credentials(kube_control): + """ Request kubelet node authorization with a well formed kubelet user. + This also implies that we are requesting kube-proxy auth. """ + + # The kube-cotrol interface is created to support RBAC. + # At this point we might as well do the right thing and return the hostname + # even if it will only be used when we enable RBAC + nodeuser = 'system:node:{}'.format(gethostname()) + kube_control.set_auth_request(nodeuser) + + +@when('kube-control.auth.available') +def catch_change_in_creds(kube_control): + """Request a service restart in case credential updates were detected.""" + creds = kube_control.get_auth_credentials() + if data_changed('kube-control.creds', creds): + set_state('kubernetes-worker.restart-needed') + + @when_not('kube-control.connected') def missing_kube_control(): """Inform the operator they need to add the kube-control relation. diff --git a/cluster/kubemark/gce/config-default.sh b/cluster/kubemark/gce/config-default.sh index dd9df48f807..616f8ad4775 100644 --- a/cluster/kubemark/gce/config-default.sh +++ b/cluster/kubemark/gce/config-default.sh @@ -35,7 +35,7 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci} -NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-debian} +NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-stable-59-9460-64-0} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} diff --git a/cluster/kubemark/pre-existing/config-default.sh b/cluster/kubemark/pre-existing/config-default.sh new file mode 100644 index 00000000000..d6e4ab51871 --- /dev/null +++ b/cluster/kubemark/pre-existing/config-default.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration for landing a Kubemark cluster on a pre-existing Kubernetes +# cluster. + +# Pre-existing provider expects a MASTER_IP. +# If you need to specify a port that's not the default (443), add it to MASTER_IP. +# +# Example: Connect to the Master on the secure port 6443 +# MASTER_IP=192.168.122.5:6443 +# +MASTER_IP="${MASTER_IP:-}" + +# The container registry and project given to the kubemark container: +# $CONTAINER_REGISTRY/$PROJECT/kubemark +# +CONTAINER_REGISTRY="${CONTAINER_REGISTRY:-}" +PROJECT="${PROJECT:-}" + +NUM_NODES="${NUM_NODES:-1}" + +TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}" +KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-}" +KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-}" +MASTER_NAME="${MASTER_NAME:-}" +USE_REAL_PROXIER="${USE_REAL_PROXIER:-true}" diff --git a/cluster/log-dump.sh b/cluster/log-dump.sh index 941df1eb5b1..8855bf54805 100755 --- a/cluster/log-dump.sh +++ b/cluster/log-dump.sh @@ -224,8 +224,19 @@ function dump_nodes() { return fi + nodes_selected_for_logs=() + if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then + # We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs. + for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}` + do + nodes_selected_for_logs+=("${node_names[$index]}") + done + else + nodes_selected_for_logs=( "${node_names[@]}" ) + fi + proc=${max_scp_processes} - for node_name in "${node_names[@]}"; do + for node_name in "${nodes_selected_for_logs[@]}"; do node_dir="${report_dir}/${node_name}" mkdir -p "${node_dir}" # Save logs in the background. This speeds up things when there are diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh new file mode 100755 index 00000000000..0bd10a141cf --- /dev/null +++ b/cluster/log-dump/log-dump.sh @@ -0,0 +1,363 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Call this to dump all master and node logs into the folder specified in $1 +# (defaults to _artifacts). Only works if the provider supports SSH. + +set -o errexit +set -o nounset +set -o pipefail + +readonly report_dir="${1:-_artifacts}" + +# In order to more trivially extend log-dump for custom deployments, +# check for a function named log_dump_custom_get_instances. If it's +# defined, we assume the function can me called with one argument, the +# role, which is either "master" or "node". +if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then + readonly use_custom_instance_list=yes +else + readonly use_custom_instance_list= +fi + +readonly master_ssh_supported_providers="gce aws kubemark" +readonly node_ssh_supported_providers="gce gke aws kubemark" + +readonly master_logfiles="kube-apiserver kube-scheduler rescheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd" +readonly node_logfiles="kube-proxy fluentd node-problem-detector" +readonly node_systemd_services="node-problem-detector" +readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-*" +readonly aws_logfiles="cloud-init-output" +readonly gce_logfiles="startupscript" +readonly kern_logfile="kern" +readonly initd_logfiles="docker" +readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr" +readonly systemd_services="kubelet docker" + +# Limit the number of concurrent node connections so that we don't run out of +# file descriptors for large clusters. +readonly max_scp_processes=25 + +# This template spits out the external IPs and images for each node in the cluster in a format like so: +# 52.32.7.85 gcr.io/google_containers/kube-apiserver:1355c18c32d7bef16125120bce194fad gcr.io/google_containers/kube-controller-manager:46365cdd8d28b8207950c3c21d1f3900 [...] +readonly ips_and_images='{range .items[*]}{@.status.addresses[?(@.type == "ExternalIP")].address} {@.status.images[*].names[*]}{"\n"}{end}' + +function setup() { + if [[ -z "${use_custom_instance_list}" ]]; then + KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. + : ${KUBE_CONFIG_FILE:="config-test.sh"} + source "${KUBE_ROOT}/cluster/kube-util.sh" + detect-project &> /dev/null + elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then + echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances" + exit 1 + elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then + echo "LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances" + exit 1 + fi +} + +function log-dump-ssh() { + if [[ -z "${use_custom_instance_list}" ]]; then + ssh-to-node "$@" + return + fi + + local host="$1" + local cmd="$2" + + ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}" +} + +# Copy all files /var/log/{$3}.log on node $1 into local dir $2. +# $3 should be a space-separated string of files. +# This function shouldn't ever trigger errexit, but doesn't block stderr. +function copy-logs-from-node() { + local -r node="${1}" + local -r dir="${2}" + local files=( ${3} ) + # Append ".log*" + # The * at the end is needed to also copy rotated logs (which happens + # in large clusters and long runs). + files=( "${files[@]/%/.log*}" ) + # Prepend "/var/log/" + files=( "${files[@]/#/\/var\/log\/}" ) + # Comma delimit (even the singleton, or scp does the wrong thing), surround by braces. + local -r scp_files="{$(printf "%s," "${files[@]}")}" + + if [[ -n "${use_custom_instance_list}" ]]; then + scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true + else + case "${KUBERNETES_PROVIDER}" in + gce|gke|kubemark) + # get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information + gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true + gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true + ;; + aws) + local ip=$(get_ssh_hostname "${node}") + scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true + ;; + esac + fi +} + +# Save logs for node $1 into directory $2. Pass in any non-common files in $3. +# Pass in any non-common systemd services in $4. +# $3 and $4 should be a space-separated list of files. +# This function shouldn't ever trigger errexit +function save-logs() { + local -r node_name="${1}" + local -r dir="${2}" + local files="${3}" + local opt_systemd_services="${4:-""}" + if [[ -n "${use_custom_instance_list}" ]]; then + if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then + files="${files} ${LOG_DUMP_SAVE_LOGS:-}" + fi + else + case "${KUBERNETES_PROVIDER}" in + gce|gke|kubemark) + files="${files} ${gce_logfiles}" + if [[ "${KUBERNETES_PROVIDER}" == "kubemark" && "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then + files="${files} ${hollow_node_logfiles}" + fi + ;; + aws) + files="${files} ${aws_logfiles}" + ;; + esac + fi + local -r services=( ${systemd_services} ${opt_systemd_services} ${LOG_DUMP_SAVE_SERVICES:-} ) + + if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then + log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true + log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true + log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true + + for svc in "${services[@]}"; do + log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true + done + else + files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}" + fi + + echo "Changing logfiles to be world-readable for download" + log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true + + echo "Copying '${files}' from ${node_name}" + copy-logs-from-node "${node_name}" "${dir}" "${files}" +} + +function dump_masters() { + local master_names + if [[ -n "${use_custom_instance_list}" ]]; then + master_names=( $(log_dump_custom_get_instances master) ) + elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then + echo "Master SSH not supported for ${KUBERNETES_PROVIDER}" + return + else + if ! (detect-master &> /dev/null); then + echo "Master not detected. Is the cluster up?" + return + fi + master_names=( "${MASTER_NAME}" ) + fi + + if [[ "${#master_names[@]}" == 0 ]]; then + echo "No masters found?" + return + fi + + proc=${max_scp_processes} + for master_name in "${master_names[@]}"; do + master_dir="${report_dir}/${master_name}" + mkdir -p "${master_dir}" + save-logs "${master_name}" "${master_dir}" "${master_logfiles}" & + + # We don't want to run more than ${max_scp_processes} at a time, so + # wait once we hit that many nodes. This isn't ideal, since one might + # take much longer than the others, but it should help. + proc=$((proc - 1)) + if [[ proc -eq 0 ]]; then + proc=${max_scp_processes} + wait + fi + done + # Wait for any remaining processes. + if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then + wait + fi +} + +function dump_nodes() { + local node_names + if [[ -n "$1" ]]; then + echo "Dumping logs for nodes provided as args to dump_nodes() function" + node_names=( "$@" ) + elif [[ -n "${use_custom_instance_list}" ]]; then + echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function" + node_names=( $(log_dump_custom_get_instances node) ) + elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then + echo "Node SSH not supported for ${KUBERNETES_PROVIDER}" + return + else + echo "Detecting nodes in the cluster" + detect-node-names &> /dev/null + node_names=( "${NODE_NAMES[@]}" ) + fi + + if [[ "${#node_names[@]}" == 0 ]]; then + echo "No nodes found!" + return + fi + + nodes_selected_for_logs=() + if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then + # We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs. + for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}` + do + nodes_selected_for_logs+=("${node_names[$index]}") + done + else + nodes_selected_for_logs=( "${node_names[@]}" ) + fi + + proc=${max_scp_processes} + for node_name in "${nodes_selected_for_logs[@]}"; do + node_dir="${report_dir}/${node_name}" + mkdir -p "${node_dir}" + # Save logs in the background. This speeds up things when there are + # many nodes. + save-logs "${node_name}" "${node_dir}" "${node_logfiles}" "${node_systemd_services}" & + + # We don't want to run more than ${max_scp_processes} at a time, so + # wait once we hit that many nodes. This isn't ideal, since one might + # take much longer than the others, but it should help. + proc=$((proc - 1)) + if [[ proc -eq 0 ]]; then + proc=${max_scp_processes} + wait + fi + done + # Wait for any remaining processes. + if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then + wait + fi +} + +function dump_nodes_with_logexporter() { + echo "Detecting nodes in the cluster" + detect-node-names &> /dev/null + + if [[ "${#NODE_NAMES[@]}" == 0 ]]; then + echo "No nodes found!" + return + fi + + # Obtain parameters required by logexporter. + local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64)" + local -r cloud_provider="${KUBERNETES_PROVIDER}" + local -r gcs_artifacts_dir="${GCS_ARTIFACTS_DIR}" + local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}" + local -r logexport_timeout_seconds="$(( 30 + NUM_NODES / 10 ))" + + # Fill in the parameters in the logexporter daemonset template. + sed -i'' -e "s/{{.ServiceAccountCredentials}}/${service_account_credentials}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + sed -i'' -e "s/{{.CloudProvider}}/${cloud_provider}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + sed -i'' -e "s/{{.GCSPath}}/${gcs_artifacts_dir}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + sed -i'' -e "s/{{.EnableHollowNodeLogs}}/${enable_hollow_node_logs}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + + # Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace. + KUBECTL="${KUBECTL:-${KUBE_ROOT}/cluster/kubectl.sh}" + "${KUBECTL}" create -f "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + + # Give some time for the pods to finish uploading logs. + sleep "${logexport_sleep_seconds}" + + # List the logexporter pods created and their corresponding nodes. + pods_and_nodes=() + for retry in {1..5}; do + pods_and_nodes=$(${KUBECTL} get pods -n logexporter -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName | tail -n +2) + if [[ -n "${pods_and_nodes}" ]]; then + echo -e "List of logexporter pods found:\n${pods_and_nodes}" + break + fi + if [[ "${retry}" == 5 ]]; then + echo "Failed to list any logexporter pods after multiple retries.. falling back to logdump for nodes through SSH" + "${KUBECTL}" delete namespace logexporter + dump_nodes "${NODE_NAMES[@]}" + return + fi + done + + # Collect names of nodes we didn't find a logexporter pod on. + # Note: This step is O(#nodes^2) as we check if each node is present in the list of nodes running logexporter. + # Making it linear would add code complexity without much benefit (as it just takes < 1s for 5k nodes anyway). + failed_nodes=() + for node in "${NODE_NAMES[@]}"; do + if [[ ! "${pods_and_nodes}" =~ "${node}" ]]; then + failed_nodes+=("${node}") + fi + done + + # Collect names of nodes whose logexporter pod didn't succeed. + # TODO(shyamjvs): Parallelize the for loop below to make it faster (if needed). + logexporter_pods=( $(echo "${pods_and_nodes}" | awk '{print $1}') ) + logexporter_nodes=( $(echo "${pods_and_nodes}" | awk '{print $2}') ) + for index in "${!logexporter_pods[@]}"; do + pod="${logexporter_pods[$index]}" + node="${logexporter_nodes[$index]}" + # TODO(shyamjvs): Use a /status endpoint on the pod instead of checking its logs if that's faster. + pod_success_log=$(${KUBECTL} get logs ${pod} -n logexporter 2>&1 | grep "Logs successfully uploaded") || true + if [[ -z "${pod_success_log}" ]]; then + failed_nodes+=("${node}") + fi + done + + # Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH. + "${KUBECTL}" delete namespace logexporter + if [[ "${#failed_nodes[@]}" != 0 ]]; then + echo -e "Dumping logs through SSH for nodes logexporter failed to succeed on:\n${failed_nodes[@]}" + dump_nodes "${failed_nodes[@]}" + fi +} + +function main() { + setup + # Copy master logs to artifacts dir locally (through SSH). + echo "Dumping logs from master locally to '${report_dir}'" + dump_masters + if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then + echo "Skipping dumping of node logs" + return + fi + + # Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH). + if [[ "${ENABLE_LOGEXPORTER:-}" == "true" ]]; then + if [[ -z "${GCS_ARTIFACTS_DIR:-}" ]]; then + echo "Env var GCS_ARTIFACTS_DIR is empty. Failed to dump node logs to GCS." + exit 1 + fi + echo "Dumping logs from nodes to GCS directly at '${GCS_ARTIFACTS_DIR}'" + dump_nodes_with_logexporter + else + echo "Dumping logs from nodes locally to '${report_dir}'" + dump_nodes + fi +} + +main diff --git a/cluster/log-dump/logexporter-daemonset.yaml b/cluster/log-dump/logexporter-daemonset.yaml new file mode 100644 index 00000000000..8c16dc35bcf --- /dev/null +++ b/cluster/log-dump/logexporter-daemonset.yaml @@ -0,0 +1,74 @@ +# Template job config for running the log exporter on the cluster as a daemonset. +# Creates everything within 'logexporter' namespace. +# +# Note: Since daemonsets have "AlwaysRestart" policy for pods, we provide a long +# sleep-duration (24 hr) to the logexporter pods so they don't finish the work and +# get restarted while some pods are still running. So it is your duty to detect +# the work has been done (or use some timeout) and delete the daemonset yourself. + +apiVersion: v1 +kind: Namespace +metadata: + name: logexporter +--- +apiVersion: v1 +kind: Secret +metadata: + name: google-service-account + namespace: logexporter +type: Opaque +data: + service-account.json: {{.ServiceAccountCredentials}} +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: logexporter + namespace: logexporter +spec: + template: + metadata: + labels: + app: logexporter + spec: + containers: + - name: logexporter-test + image: gcr.io/google-containers/logexporter:v0.1.0 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - logexporter + - --node-name=$(NODE_NAME) + - --cloud-provider={{.CloudProvider}} + - --gcs-path={{.GCSPath}} + - --gcloud-auth-file-path=/etc/service-account/service-account.json + - --enable-hollow-node-logs={{.EnableHollowNodeLogs}} + - --sleep-duration=24h + - --alsologtostderr + volumeMounts: + - mountPath: /etc/service-account + name: service + readOnly: true + - mountPath: /var/log + name: varlog + readOnly: true + - mountPath: /workspace/etc + name: hostetc + readOnly: true + resources: + requests: + cpu: 10m + memory: 10Mi + volumes: + - name: service + secret: + secretName: google-service-account + - name: varlog + hostPath: + path: /var/log + - name: hostetc + hostPath: + path: /etc diff --git a/cluster/openstack-heat/config-default.sh b/cluster/openstack-heat/config-default.sh index 66702a3e9f3..bc6fb3aa726 100644 --- a/cluster/openstack-heat/config-default.sh +++ b/cluster/openstack-heat/config-default.sh @@ -43,6 +43,9 @@ CLUSTER_IP_RANGE=${CLUSTER_IP_RANGE:-10.244.0.0/16} SWIFT_SERVER_URL=${SWIFT_SERVER_URL:-} +# The name of the object store container to use +SWIFT_OBJECT_STORE=${SWIFT_OBJECT_STORE:-kubernetes} + # Flag indicates if new image must be created. If 'false' then image with IMAGE_ID will be used. # If 'true' then new image will be created from file config-image.sh CREATE_IMAGE=${CREATE_IMAGE:-true} # use "true" for devstack diff --git a/cluster/openstack-heat/util.sh b/cluster/openstack-heat/util.sh index 66726db3e23..c81b16cc201 100644 --- a/cluster/openstack-heat/util.sh +++ b/cluster/openstack-heat/util.sh @@ -108,7 +108,7 @@ function create-stack() { # ROOT # KUBERNETES_RELEASE_TAR function upload-resources() { - swift post kubernetes --read-acl '.r:*,.rlistings' + swift post ${SWIFT_OBJECT_STORE} --read-acl '.r:*,.rlistings' locations=( "${ROOT}/../../_output/release-tars/${KUBERNETES_RELEASE_TAR}" @@ -119,11 +119,11 @@ function upload-resources() { RELEASE_TAR_PATH=$(dirname ${RELEASE_TAR_LOCATION}) echo "[INFO] Uploading ${KUBERNETES_RELEASE_TAR}" - swift upload kubernetes ${RELEASE_TAR_PATH}/${KUBERNETES_RELEASE_TAR} \ + swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/${KUBERNETES_RELEASE_TAR} \ --object-name kubernetes-server.tar.gz echo "[INFO] Uploading kubernetes-salt.tar.gz" - swift upload kubernetes ${RELEASE_TAR_PATH}/kubernetes-salt.tar.gz \ + swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/kubernetes-salt.tar.gz \ --object-name kubernetes-salt.tar.gz } @@ -196,7 +196,7 @@ function run-heat-script() { fi SWIFT_SERVER_URL=$(openstack catalog show object-store --format value | egrep -o "$rgx" | cut -d" " -f2 | head -n 1) fi - local swift_repo_url="${SWIFT_SERVER_URL}/kubernetes" + local swift_repo_url="${SWIFT_SERVER_URL}/${SWIFT_OBJECT_STORE}" if [ $CREATE_IMAGE = true ]; then echo "[INFO] Retrieve new image ID" diff --git a/cluster/pre-existing/util.sh b/cluster/pre-existing/util.sh new file mode 100644 index 00000000000..8443653c5a2 --- /dev/null +++ b/cluster/pre-existing/util.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions for landing kubemark containers on a +# pre-existing Kubernetes master. See test/kubemark/pre-existing/README.md +# for me details on using a pre-existing provider. + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. + +source "${KUBE_ROOT}/cluster/common.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +function detect-project() { + if [[ -z "${MASTER_IP:-}" ]]; then + echo "Set 'MASTER_IP' to the instance assigned to be the Kubernetes master" 1>&2 + exit 1 + fi + + if [[ -z "${PROJECT:-}" ]]; then + echo "Set 'PROJECT' to the name of the container project: $CONTAINER_REGISTRY/$PROJECT/kubemark" >&2 + exit 1 + fi + + if [[ -z "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then + cluster_range=$(echo "${MASTER_IP}" | awk -F '.' '{printf("%d.%d.%d.0", $1, $2, $3)}') + SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-$cluster_range/16}" + fi +} + +function create-certs { + rm /tmp/kubeconfig + + execute-cmd-on-pre-existing-master-with-retries "sudo cat /etc/kubernetes/admin.conf" > /tmp/kubeconfig + CA_CERT_BASE64=$(cat /tmp/kubeconfig | grep certificate-authority | awk '{print $2}' | head -n 1) + KUBELET_CERT_BASE64=$(cat /tmp/kubeconfig | grep client-certificate-data | awk '{print $2}' | head -n 1) + KUBELET_KEY_BASE64=$(cat /tmp/kubeconfig | grep client-key-data | awk '{print $2}' | head -n 1) + + # Local kubeconfig.kubemark vars + KUBECFG_CERT_BASE64="${KUBELET_CERT_BASE64}" + KUBECFG_KEY_BASE64="${KUBELET_KEY_BASE64}" + + # The pre-existing Kubernetes master already has these setup + # Set these vars but don't use them + CA_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + MASTER_CERT_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + MASTER_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + KUBEAPISERVER_CERT_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + KUBEAPISERVER_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) +} diff --git a/cluster/vagrant/OWNERS b/cluster/vagrant/OWNERS index 5415a641596..f90049369f8 100644 --- a/cluster/vagrant/OWNERS +++ b/cluster/vagrant/OWNERS @@ -35,4 +35,3 @@ reviewers: - k82cn - caseydavenport - johscheuer -- rjnagal diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index 3d10edd3892..da3cbcdc14e 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -57,7 +57,7 @@ if [[ "${KUBERNETES_PROVIDER:-}" == "gce" ]]; then # In multizone mode we need to add instances for all nodes in the region. if [[ "${MULTIZONE:-}" == "true" ]]; then EXPECTED_NUM_NODES=$(gcloud -q compute instances list --project="${PROJECT}" --format=[no-heading] --regexp="${NODE_INSTANCE_PREFIX}.*" \ - --zones=$(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=[no-heading]\(name\) | tr "\n" "," | sed "s/,$//") | wc -l) + --zones=$(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=csv[no-heading]\(name\) | tr "\n" "," | sed "s/,$//") | wc -l) echo "Computing number of nodes, NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX}, REGION=${REGION}, EXPECTED_NUM_NODES=${EXPECTED_NUM_NODES}" fi fi diff --git a/cmd/cloud-controller-manager/app/BUILD b/cmd/cloud-controller-manager/app/BUILD index d0a20816d9f..31ccc737252 100644 --- a/cmd/cloud-controller-manager/app/BUILD +++ b/cmd/cloud-controller-manager/app/BUILD @@ -16,8 +16,6 @@ go_library( "//pkg/api:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", - "//pkg/client/leaderelection:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/cloud:go_default_library", @@ -32,9 +30,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 71045336f39..c4297c16855 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -30,16 +30,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" - "k8s.io/kubernetes/pkg/client/leaderelection" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" nodecontroller "k8s.io/kubernetes/pkg/controller/cloud" @@ -102,7 +103,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } - leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) + leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) // Start the external controller manager server go func() { @@ -253,9 +254,8 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. - var versionStrings []string err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { - if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil { + if _, err = restclient.ServerAPIVersions(kubeconfig); err == nil { return true, nil } glog.Errorf("Failed to get api versions from server: %v", err) diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index aa17941dbe9..94a6333d054 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -13,7 +13,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/apis/componentconfig:go_default_library", - "//pkg/client/leaderelection:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 74a51749680..0f5a3e79689 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/client/leaderelection" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/master/ports" // add the kubernetes feature gates @@ -56,7 +56,7 @@ func NewCloudControllerManagerServer() *CloudControllerManagerServer { ContentType: "application/vnd.kubernetes.protobuf", KubeAPIQPS: 20.0, KubeAPIBurst: 30, - LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(), + LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, }, NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute}, @@ -90,7 +90,7 @@ func (s *CloudControllerManagerServer) AddFlags(fs *pflag.FlagSet) { fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") - leaderelection.BindFlags(&s.LeaderElection, fs) + leaderelectionconfig.BindFlags(&s.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } diff --git a/cmd/genkubedocs/BUILD b/cmd/genkubedocs/BUILD index a947a1283d9..61f0c4f5133 100644 --- a/cmd/genkubedocs/BUILD +++ b/cmd/genkubedocs/BUILD @@ -19,6 +19,7 @@ go_library( srcs = ["gen_kube_docs.go"], tags = ["automanaged"], deps = [ + "//cmd/cloud-controller-manager/app:go_default_library", "//cmd/genutils:go_default_library", "//cmd/kube-apiserver/app:go_default_library", "//cmd/kube-controller-manager/app:go_default_library", diff --git a/cmd/genkubedocs/gen_kube_docs.go b/cmd/genkubedocs/gen_kube_docs.go index 0cdd763c661..faebb60e83b 100644 --- a/cmd/genkubedocs/gen_kube_docs.go +++ b/cmd/genkubedocs/gen_kube_docs.go @@ -21,6 +21,7 @@ import ( "os" "github.com/spf13/cobra/doc" + ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app" @@ -56,6 +57,10 @@ func main() { // generate docs for kube-controller-manager controllermanager := cmapp.NewControllerManagerCommand() doc.GenMarkdownTree(controllermanager, outDir) + case "cloud-controller-manager": + // generate docs for cloud-controller-manager + cloudcontrollermanager := ccmapp.NewCloudControllerManagerCommand() + doc.GenMarkdownTree(cloudcontrollermanager, outDir) case "kube-proxy": // generate docs for kube-proxy proxy := proxyapp.NewProxyCommand() diff --git a/cmd/genman/BUILD b/cmd/genman/BUILD index 06dd45cd035..b6e57067d74 100644 --- a/cmd/genman/BUILD +++ b/cmd/genman/BUILD @@ -19,6 +19,7 @@ go_library( srcs = ["gen_kube_man.go"], tags = ["automanaged"], deps = [ + "//cmd/cloud-controller-manager/app:go_default_library", "//cmd/genutils:go_default_library", "//cmd/kube-apiserver/app:go_default_library", "//cmd/kube-controller-manager/app:go_default_library", diff --git a/cmd/genman/gen_kube_man.go b/cmd/genman/gen_kube_man.go index 85361844ffb..f8974951252 100644 --- a/cmd/genman/gen_kube_man.go +++ b/cmd/genman/gen_kube_man.go @@ -26,6 +26,7 @@ import ( mangen "github.com/cpuguy83/go-md2man/md2man" "github.com/spf13/cobra" "github.com/spf13/pflag" + ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app" @@ -73,6 +74,13 @@ func main() { for _, c := range controllermanager.Commands() { genMarkdown(c, "kube-controller-manager", outDir) } + case "cloud-controller-manager": + //generate manpage for cloud-controller-manager + controllermanager := ccmapp.NewCloudControllerManagerCommand() + genMarkdown(controllermanager, "", outDir) + for _, c := range controllermanager.Commands() { + genMarkdown(c, "cloud-controller-manager", outDir) + } case "kube-proxy": // generate manpage for kube-proxy proxy := proxyapp.NewProxyCommand() diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index 31b6b0f9b9a..661844133fa 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -39,7 +39,7 @@ go_library( "//pkg/kubeapiserver/options:go_default_library", "//pkg/kubeapiserver/server:go_default_library", "//pkg/master:go_default_library", - "//pkg/master/thirdparty:go_default_library", + "//pkg/master/controller/crdregistration:go_default_library", "//pkg/master/tunneler:go_default_library", "//pkg/quota/install:go_default_library", "//pkg/registry/cachesize:go_default_library", @@ -89,7 +89,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", @@ -119,6 +118,7 @@ filegroup( ":package-srcs", "//cmd/kube-apiserver/app/options:all-srcs", "//cmd/kube-apiserver/app/preflight:all-srcs", + "//cmd/kube-apiserver/app/testing:all-srcs", ], tags = ["automanaged"], ) diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index e146d98c9b9..d4a884e75f0 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -41,8 +41,7 @@ import ( apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - "k8s.io/kubernetes/pkg/master/thirdparty" + "k8s.io/kubernetes/pkg/master/controller/crdregistration" ) func createAggregatorConfig(kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport) (*aggregatorapiserver.Config, error) { @@ -85,7 +84,7 @@ func createAggregatorConfig(kubeAPIServerConfig genericapiserver.Config, command return aggregatorConfig, nil } -func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, kubeInformers informers.SharedInformerFactory, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { +func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err @@ -98,14 +97,13 @@ func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delega } autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient) apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController) - tprRegistrationController := thirdparty.NewAutoRegistrationController( - kubeInformers.Extensions().InternalVersion().ThirdPartyResources(), + crdRegistrationController := crdregistration.NewAutoRegistrationController( apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { go autoRegistrationController.Run(5, context.StopCh) - go tprRegistrationController.Run(5, context.StopCh) + go crdRegistrationController.Run(5, context.StopCh) return nil }) aggregatorServer.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("autoregister-completion", func(r *http.Request) error { diff --git a/cmd/kube-apiserver/app/options/options.go b/cmd/kube-apiserver/app/options/options.go index edc844d5bb6..c9daf9da337 100644 --- a/cmd/kube-apiserver/app/options/options.go +++ b/cmd/kube-apiserver/app/options/options.go @@ -143,7 +143,7 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { "Amount of time to retain events.") fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, - "If true, allow privileged containers.") + "If true, allow privileged containers. [default=false]") fs.BoolVar(&s.EnableLogsHandler, "enable-logs-handler", s.EnableLogsHandler, "If true, install a /logs handler for the apiserver logs.") diff --git a/cmd/kube-apiserver/app/options/validation.go b/cmd/kube-apiserver/app/options/validation.go index 3a9bbbfea65..fb937bdf7db 100644 --- a/cmd/kube-apiserver/app/options/validation.go +++ b/cmd/kube-apiserver/app/options/validation.go @@ -63,6 +63,9 @@ func (options *ServerRunOptions) Validate() []error { if errs := options.Authentication.Validate(); len(errs) > 0 { errors = append(errors, errs...) } + if errs := options.Audit.Validate(); len(errs) > 0 { + errors = append(errors, errs...) + } if errs := options.InsecureServing.Validate("insecure-port"); len(errs) > 0 { errors = append(errors, errs...) } diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 9c101c706c5..ba6736f3137 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -47,7 +47,6 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authorization/authorizer" - genericregistry "k8s.io/apiserver/pkg/registry/generic" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/options/encryptionconfig" @@ -111,30 +110,39 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error { // To help debugging, immediately log version glog.Infof("Version: %+v", version.Get()) - nodeTunneler, proxyTransport, err := CreateNodeDialer(runOptions) + server, err := CreateServerChain(runOptions, stopCh) if err != nil { return err } + return server.PrepareRun().Run(stopCh) +} + +// CreateServerChain creates the apiservers connected via delegation. +func CreateServerChain(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) (*genericapiserver.GenericAPIServer, error) { + nodeTunneler, proxyTransport, err := CreateNodeDialer(runOptions) + if err != nil { + return nil, err + } kubeAPIServerConfig, sharedInformers, versionedInformers, insecureServingOptions, serviceResolver, err := CreateKubeAPIServerConfig(runOptions, nodeTunneler, proxyTransport) if err != nil { - return err + return nil, err } // TPRs are enabled and not yet beta, since this these are the successor, they fall under the same enablement rule // If additional API servers are added, they should be gated. apiExtensionsConfig, err := createAPIExtensionsConfig(*kubeAPIServerConfig.GenericConfig, runOptions) if err != nil { - return err + return nil, err } apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, genericapiserver.EmptyDelegate) if err != nil { - return err + return nil, err } - kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, apiExtensionsServer.GenericAPIServer, sharedInformers, apiExtensionsConfig.CRDRESTOptionsGetter) + kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, apiExtensionsServer.GenericAPIServer, sharedInformers) if err != nil { - return err + return nil, err } // if we're starting up a hacked up version of this API server for a weird test case, @@ -143,11 +151,11 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error { if insecureServingOptions != nil { insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(kubeAPIServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig) if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil { - return err + return nil, err } } - return kubeAPIServer.GenericAPIServer.PrepareRun().Run(stopCh) + return kubeAPIServer.GenericAPIServer, nil } // otherwise go down the normal path of standing the aggregator up in front of the API server @@ -157,29 +165,29 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error { // aggregator comes last in the chain aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, runOptions, versionedInformers, serviceResolver, proxyTransport) if err != nil { - return err + return nil, err } aggregatorConfig.ProxyTransport = proxyTransport aggregatorConfig.ServiceResolver = serviceResolver - aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, sharedInformers, apiExtensionsServer.Informers) + aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, apiExtensionsServer.Informers) if err != nil { // we don't need special handling for innerStopCh because the aggregator server doesn't create any go routines - return err + return nil, err } if insecureServingOptions != nil { insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(aggregatorServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig) if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil { - return err + return nil, err } } - return aggregatorServer.GenericAPIServer.PrepareRun().Run(stopCh) + return aggregatorServer.GenericAPIServer, nil } // CreateKubeAPIServer creates and wires a workable kube-apiserver -func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, delegateAPIServer genericapiserver.DelegationTarget, sharedInformers informers.SharedInformerFactory, crdRESTOptionsGetter genericregistry.RESTOptionsGetter) (*master.Master, error) { - kubeAPIServer, err := kubeAPIServerConfig.Complete().New(delegateAPIServer, crdRESTOptionsGetter) +func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, delegateAPIServer genericapiserver.DelegationTarget, sharedInformers informers.SharedInformerFactory) (*master.Master, error) { + kubeAPIServer, err := kubeAPIServerConfig.Complete().New(delegateAPIServer) if err != nil { return nil, err } @@ -256,8 +264,10 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele return nil, nil, nil, nil, nil, err } - if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.ServerList}.CheckEtcdServers); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err) + if _, port, err := net.SplitHostPort(s.Etcd.StorageConfig.ServerList[0]); err == nil && port != "0" && len(port) != 0 { + if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.ServerList}.CheckEtcdServers); err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err) + } } capabilities.Initialize(capabilities.Capabilities{ diff --git a/cmd/kube-apiserver/app/testing/BUILD b/cmd/kube-apiserver/app/testing/BUILD new file mode 100644 index 00000000000..d25a92a7d96 --- /dev/null +++ b/cmd/kube-apiserver/app/testing/BUILD @@ -0,0 +1,56 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["server_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/networking/v1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["testserver.go"], + tags = ["automanaged"], + deps = [ + "//cmd/kube-apiserver/app:go_default_library", + "//cmd/kube-apiserver/app/options:go_default_library", + "//pkg/api:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cmd/kube-apiserver/app/testing/server_test.go b/cmd/kube-apiserver/app/testing/server_test.go new file mode 100644 index 00000000000..8dc30f90f7b --- /dev/null +++ b/cmd/kube-apiserver/app/testing/server_test.go @@ -0,0 +1,231 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "testing" + "time" + + appsv1beta1 "k8s.io/api/apps/v1beta1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + +func TestRun(t *testing.T) { + config, tearDown := StartTestServerOrDie(t) + defer tearDown() + + client, err := kubernetes.NewForConfig(config) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // test whether the server is really healthy after /healthz told us so + t.Logf("Creating Deployment directly after being healthy") + var replicas int32 = 1 + _, err = client.AppsV1beta1().Deployments("default").Create(&appsv1beta1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: appsv1beta1.DeploymentSpec{ + Replicas: &replicas, + Strategy: appsv1beta1.DeploymentStrategy{ + Type: appsv1beta1.RollingUpdateDeploymentStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "foo", + Image: "foo", + }, + }, + }, + }, + }, + }) + if err != nil { + t.Fatalf("Failed to create deployment: %v", err) + } +} + +func TestCRDShadowGroup(t *testing.T) { + config, tearDown := StartTestServerOrDie(t) + defer tearDown() + + kubeclient, err := kubernetes.NewForConfig(config) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + apiextensionsclient, err := apiextensionsclientset.NewForConfig(config) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + t.Logf("Creating a NetworkPolicy") + nwPolicy, err := kubeclient.NetworkingV1().NetworkPolicies("default").Create(&networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + Ingress: []networkingv1.NetworkPolicyIngressRule{}, + }, + }) + if err != nil { + t.Fatalf("Failed to create NetworkPolicy: %v", err) + } + + t.Logf("Trying to shadow networking group") + crd := &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foos." + networkingv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: networkingv1.GroupName, + Version: networkingv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: "foos", + Kind: "Foo", + }, + }, + } + if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { + t.Fatalf("Failed to create networking group CRD: %v", err) + } + if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil { + t.Fatalf("Failed to establish networking group CRD: %v", err) + } + // wait to give aggregator time to update + time.Sleep(2 * time.Second) + + t.Logf("Checking that we still see the NetworkPolicy") + _, err = kubeclient.NetworkingV1().NetworkPolicies(nwPolicy.Namespace).Get(nwPolicy.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Failed to get NetworkPolocy: %v", err) + } + + t.Logf("Checking that crd resource does not show up in networking group") + found, err := crdExistsInDiscovery(apiextensionsclient, crd) + if err != nil { + t.Fatalf("unexpected discovery error: %v", err) + } + if found { + t.Errorf("CRD resource shows up in discovery, but shouldn't.") + } +} + +func TestCRD(t *testing.T) { + config, tearDown := StartTestServerOrDie(t) + defer tearDown() + + apiextensionsclient, err := apiextensionsclientset.NewForConfig(config) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + t.Logf("Trying to create a custom resource without conflict") + crd := &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foos.cr.bar.com", + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: "cr.bar.com", + Version: "v1", + Scope: apiextensionsv1beta1.NamespaceScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: "foos", + Kind: "Foo", + }, + }, + } + if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { + t.Fatalf("Failed to create foos.cr.bar.com CRD; %v", err) + } + if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil { + t.Fatalf("Failed to establish foos.cr.bar.com CRD: %v", err) + } + if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { + return crdExistsInDiscovery(apiextensionsclient, crd) + }); err != nil { + t.Fatalf("Failed to see foos.cr.bar.com in discovery: %v", err) + } + + t.Logf("Trying to access foos.cr.bar.com with dynamic client") + barComConfig := *config + barComConfig.GroupVersion = &schema.GroupVersion{Group: "cr.bar.com", Version: "v1"} + barComConfig.APIPath = "/apis" + barComClient, err := dynamic.NewClient(&barComConfig) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + _, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Failed to list foos.cr.bar.com instances: %v", err) + } +} + +func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error { + return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextensionsv1beta1.Established: + if cond.Status == apiextensionsv1beta1.ConditionTrue { + return true, err + } + case apiextensionsv1beta1.NamesAccepted: + if cond.Status == apiextensionsv1beta1.ConditionFalse { + fmt.Printf("Name conflict: %v\n", cond.Reason) + } + } + } + return false, nil + }) +} + +func crdExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) (bool, error) { + resourceList, err := client.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + crd.Spec.Version) + if err != nil { + return false, nil + } + for _, resource := range resourceList.APIResources { + if resource.Name == crd.Spec.Names.Plural { + return true, nil + } + } + return false, nil +} diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go new file mode 100644 index 00000000000..d7cd9787609 --- /dev/null +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "strings" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/cmd/kube-apiserver/app" + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/pkg/api" +) + +// TearDownFunc is to be called to tear down a test server. +type TearDownFunc func() + +// StartTestServer starts a etcd server and kube-apiserver. A rest client config and a tear-down func +// are returned. +// +// Note: we return a tear-down func instead of a stop channel because the later will leak temporariy +// files that becaues Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporariy files. +func StartTestServer(t *testing.T) (result *restclient.Config, tearDownForCaller TearDownFunc, err error) { + var tmpDir string + var etcdServer *etcdtesting.EtcdTestServer + stopCh := make(chan struct{}) + tearDown := func() { + close(stopCh) + if etcdServer != nil { + etcdServer.Terminate(t) + } + if len(tmpDir) != 0 { + os.RemoveAll(tmpDir) + } + } + defer func() { + if tearDownForCaller == nil { + tearDown() + } + }() + + t.Logf("Starting etcd...") + etcdServer, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t, api.Scheme) + + tmpDir, err = ioutil.TempDir("", "kubernetes-kube-apiserver") + if err != nil { + return nil, nil, fmt.Errorf("Failed to create temp dir: %v", err) + } + + s := options.NewServerRunOptions() + s.InsecureServing.BindPort = 0 + s.SecureServing.BindPort = freePort() + s.SecureServing.ServerCert.CertDirectory = tmpDir + s.ServiceClusterIPRange.IP = net.IPv4(10, 0, 0, 0) + s.ServiceClusterIPRange.Mask = net.CIDRMask(16, 32) + s.Etcd.StorageConfig = *storageConfig + s.Etcd.DefaultStorageMediaType = "application/json" + s.Admission.PluginNames = strings.Split("Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds", ",") + + t.Logf("Starting kube-apiserver...") + runErrCh := make(chan error, 1) + server, err := app.CreateServerChain(s, stopCh) + if err != nil { + return nil, nil, fmt.Errorf("Failed to create server chain: %v", err) + } + go func(stopCh <-chan struct{}) { + if err := server.PrepareRun().Run(stopCh); err != nil { + t.Logf("kube-apiserver exited uncleanly: %v", err) + runErrCh <- err + } + }(stopCh) + + t.Logf("Waiting for /healthz to be ok...") + client, err := kubernetes.NewForConfig(server.LoopbackClientConfig) + if err != nil { + return nil, nil, fmt.Errorf("Failed to create a client: %v", err) + } + err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-runErrCh: + return false, err + default: + } + + result := client.CoreV1Client.RESTClient().Get().AbsPath("/healthz").Do() + status := 0 + result.StatusCode(&status) + if status == 200 { + return true, nil + } + return false, nil + }) + if err != nil { + return nil, nil, fmt.Errorf("Failed to wait for /healthz to return ok: %v", err) + } + + // from here the caller must call tearDown + return server.LoopbackClientConfig, tearDown, nil +} + +// StartTestServerOrDie calls StartTestServer with up to 5 retries on bind error and dies with +// t.Fatal if it does not succeed. +func StartTestServerOrDie(t *testing.T) (*restclient.Config, TearDownFunc) { + // retry test because the bind might fail due to a race with another process + // binding to the port. We cannot listen to :0 (then the kernel would give us + // a port which is free for sure), so we need this workaround. + for retry := 0; retry < 5 && !t.Failed(); retry++ { + config, td, err := StartTestServer(t) + if err == nil { + return config, td + } + if err != nil && !strings.Contains(err.Error(), "bind") { + break + } + t.Logf("Bind error, retrying...") + } + + t.Fatalf("Failed to launch server") + return nil, nil +} + +func freePort() int { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + panic(err) + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + panic(err) + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port +} diff --git a/cmd/kube-controller-manager/OWNERS b/cmd/kube-controller-manager/OWNERS index 9bb16d17821..03b1a794896 100644 --- a/cmd/kube-controller-manager/OWNERS +++ b/cmd/kube-controller-manager/OWNERS @@ -5,7 +5,6 @@ approvers: reviewers: - '249043822' - a-robinson -- bprashanth - brendandburns - caesarxuchao - cjcullen diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 6ca5640a9a4..4003b04fce1 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -30,8 +30,6 @@ go_library( "//pkg/apis/componentconfig:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", - "//pkg/client/leaderelection:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", @@ -105,9 +103,12 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1:go_default_library", diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index e9a54a37296..9147a1130d9 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -40,18 +40,19 @@ import ( clientv1 "k8s.io/api/core/v1" "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" - "k8s.io/kubernetes/pkg/client/leaderelection" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" @@ -127,7 +128,7 @@ func Run(s *options.CMServer) error { if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } - leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) + leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) go func() { mux := http.NewServeMux() diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go index f1b65440799..7e9be6a87e3 100644 --- a/cmd/kube-controller-manager/app/extensions.go +++ b/cmd/kube-controller-manager/app/extensions.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/deployment" - replicaset "k8s.io/kubernetes/pkg/controller/replicaset" + "k8s.io/kubernetes/pkg/controller/replicaset" ) func startDaemonSetController(ctx ControllerContext) (bool, error) { diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index 95a03df4fe9..39f587d08a7 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -13,7 +13,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/apis/componentconfig:go_default_library", - "//pkg/client/leaderelection:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/controller/garbagecollector:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index dec8d1f1adc..f7754b381d2 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/client/leaderelection" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/master/ports" @@ -106,7 +106,7 @@ func NewCMServer() *CMServer { ContentType: "application/vnd.kubernetes.protobuf", KubeAPIQPS: 20.0, KubeAPIBurst: 30, - LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(), + LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, EnableGarbageCollector: true, ConcurrentGCSyncs: 20, @@ -225,9 +225,9 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled fs.BoolVar(&s.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.") fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.") fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.") - fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizonal pod autoscaler.") + fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.") - leaderelection.BindFlags(&s.LeaderElection, fs) + leaderelectionconfig.BindFlags(&s.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 2398096dbba..93683c5ecd4 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubelet/qos:go_default_library", + "//pkg/master/ports:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/config:go_default_library", "//pkg/proxy/healthcheck:go_default_library", @@ -72,10 +73,13 @@ go_test( deps = [ "//pkg/api:go_default_library", "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/util:go_default_library", + "//pkg/util/configz:go_default_library", "//pkg/util/iptables:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 45bb03de73e..9ad009c2454 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -53,6 +53,7 @@ import ( informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/proxy" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -129,6 +130,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.Int32Var(&options.healthzPort, "healthz-port", options.healthzPort, "The port to bind the health check server. Use 0 to disable.") fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all interfaces)") fs.Int32Var(options.config.OOMScoreAdj, "oom-score-adj", util.Int32PtrDerefOr(options.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&options.config.ResourceContainer, "resource-container", options.config.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") @@ -166,7 +168,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { func NewOptions() (*Options, error) { o := &Options{ config: new(componentconfig.KubeProxyConfiguration), - healthzPort: 10256, + healthzPort: ports.ProxyHealthzPort, } o.scheme = runtime.NewScheme() @@ -447,7 +449,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx // We omit creation of pretty much everything if we run in cleanup mode if cleanupAndExit { - return &ProxyServer{IptInterface: iptInterface}, nil + return &ProxyServer{IptInterface: iptInterface, CleanupAndExit: cleanupAndExit}, nil } client, eventClient, err := createClients(config.ClientConnection, master) @@ -627,7 +629,9 @@ func (s *ProxyServer) Run() error { } } - s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")}) + if s.Broadcaster != nil && s.EventClient != nil { + s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")}) + } // Start up a healthz server if requested if s.HealthzServer != nil { diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index b7337fe6c2b..673ae0ae79b 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -17,6 +17,7 @@ limitations under the License. package app import ( + "errors" "fmt" "reflect" "runtime" @@ -27,10 +28,13 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/util/iptables" ) @@ -134,23 +138,67 @@ func Test_getProxyMode(t *testing.T) { } } -// This test verifies that Proxy Server does not crash when CleanupAndExit is true. +// TestNewOptionsFailures tests failure modes for NewOptions() +func TestNewOptionsFailures(t *testing.T) { + + // Create a fake scheme builder that generates an error + errString := fmt.Sprintf("Simulated error") + genError := func(scheme *k8sRuntime.Scheme) error { + return errors.New(errString) + } + fakeSchemeBuilder := k8sRuntime.NewSchemeBuilder(genError) + + simulatedErrorTest := func(target string) { + var addToScheme *func(s *k8sRuntime.Scheme) error + if target == "componentconfig" { + addToScheme = &componentconfig.AddToScheme + } else { + addToScheme = &v1alpha1.AddToScheme + } + restoreValue := *addToScheme + restore := func() { + *addToScheme = restoreValue + } + defer restore() + *addToScheme = fakeSchemeBuilder.AddToScheme + _, err := NewOptions() + assert.Error(t, err, fmt.Sprintf("Simulated error in component %s", target)) + } + + // Simulate errors in calls to AddToScheme() + faultTargets := []string{"componentconfig", "v1alpha1"} + for _, target := range faultTargets { + simulatedErrorTest(target) + } +} + +// This test verifies that NewProxyServer does not crash when CleanupAndExit is true. func TestProxyServerWithCleanupAndExit(t *testing.T) { - options, err := NewOptions() - if err != nil { - t.Fatal(err) + // Each bind address below is a separate test case + bindAddresses := []string{ + "0.0.0.0", + "2001:db8::1", } + for _, addr := range bindAddresses { + options, err := NewOptions() + if err != nil { + t.Fatalf("Unexpected error with address %s: %v", addr, err) + } - options.config = &componentconfig.KubeProxyConfiguration{ - BindAddress: "0.0.0.0", + options.config = &componentconfig.KubeProxyConfiguration{ + BindAddress: addr, + } + options.CleanupAndExit = true + + proxyserver, err := NewProxyServer(options.config, options.CleanupAndExit, options.scheme, options.master) + + assert.Nil(t, err, "unexpected error in NewProxyServer, addr: %s", addr) + assert.NotNil(t, proxyserver, "nil proxy server obj, addr: %s", addr) + assert.NotNil(t, proxyserver.IptInterface, "nil iptables intf, addr: %s", addr) + + // Clean up config for next test case + configz.Delete("componentconfig") } - options.CleanupAndExit = true - - proxyserver, err := NewProxyServer(options.config, options.CleanupAndExit, options.scheme, options.master) - - assert.Nil(t, err) - assert.NotNil(t, proxyserver) - assert.NotNil(t, proxyserver.IptInterface) } func TestGetConntrackMax(t *testing.T) { @@ -211,16 +259,18 @@ func TestGetConntrackMax(t *testing.T) { } } +// TestLoadConfig tests proper operation of loadConfig() func TestLoadConfig(t *testing.T) { - yaml := `apiVersion: componentconfig/v1alpha1 -bindAddress: 9.8.7.6 + + yamlTemplate := `apiVersion: componentconfig/v1alpha1 +bindAddress: %s clientConnection: acceptContentTypes: "abc" burst: 100 contentType: content-type kubeconfig: "/path/to/kubeconfig" qps: 7 -clusterCIDR: "1.2.3.0/24" +clusterCIDR: "%s" configSyncPeriod: 15s conntrack: max: 4 @@ -229,7 +279,7 @@ conntrack: tcpCloseWaitTimeout: 10s tcpEstablishedTimeout: 20s featureGates: "all" -healthzBindAddress: 1.2.3.4:12345 +healthzBindAddress: "%s" hostnameOverride: "foo" iptables: masqueradeAll: true @@ -237,7 +287,7 @@ iptables: minSyncPeriod: 10s syncPeriod: 60s kind: KubeProxyConfiguration -metricsBindAddress: 2.3.4.5:23456 +metricsBindAddress: "%s" mode: "iptables" oomScoreAdj: 17 portRange: "2-7" @@ -245,47 +295,104 @@ resourceContainer: /foo udpTimeoutMilliseconds: 123ms ` - expected := &componentconfig.KubeProxyConfiguration{ - BindAddress: "9.8.7.6", - ClientConnection: componentconfig.ClientConnectionConfiguration{ - AcceptContentTypes: "abc", - Burst: 100, - ContentType: "content-type", - KubeConfigFile: "/path/to/kubeconfig", - QPS: 7, + testCases := []struct { + name string + bindAddress string + clusterCIDR string + healthzBindAddress string + metricsBindAddress string + }{ + { + name: "IPv4 config", + bindAddress: "9.8.7.6", + clusterCIDR: "1.2.3.0/24", + healthzBindAddress: "1.2.3.4:12345", + metricsBindAddress: "2.3.4.5:23456", }, - ClusterCIDR: "1.2.3.0/24", - ConfigSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, - Conntrack: componentconfig.KubeProxyConntrackConfiguration{ - Max: 4, - MaxPerCore: 2, - Min: 1, - TCPCloseWaitTimeout: metav1.Duration{Duration: 10 * time.Second}, - TCPEstablishedTimeout: metav1.Duration{Duration: 20 * time.Second}, + { + name: "IPv6 config", + bindAddress: "2001:db8::1", + clusterCIDR: "fd00:1::0/64", + healthzBindAddress: "[fd00:1::5]:12345", + metricsBindAddress: "[fd00:2::5]:23456", }, - FeatureGates: "all", - HealthzBindAddress: "1.2.3.4:12345", - HostnameOverride: "foo", - IPTables: componentconfig.KubeProxyIPTablesConfiguration{ - MasqueradeAll: true, - MasqueradeBit: util.Int32Ptr(17), - MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - SyncPeriod: metav1.Duration{Duration: 60 * time.Second}, - }, - MetricsBindAddress: "2.3.4.5:23456", - Mode: "iptables", - OOMScoreAdj: util.Int32Ptr(17), - PortRange: "2-7", - ResourceContainer: "/foo", - UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond}, } - options, err := NewOptions() - assert.NoError(t, err) + for _, tc := range testCases { + expected := &componentconfig.KubeProxyConfiguration{ + BindAddress: tc.bindAddress, + ClientConnection: componentconfig.ClientConnectionConfiguration{ + AcceptContentTypes: "abc", + Burst: 100, + ContentType: "content-type", + KubeConfigFile: "/path/to/kubeconfig", + QPS: 7, + }, + ClusterCIDR: tc.clusterCIDR, + ConfigSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, + Conntrack: componentconfig.KubeProxyConntrackConfiguration{ + Max: 4, + MaxPerCore: 2, + Min: 1, + TCPCloseWaitTimeout: metav1.Duration{Duration: 10 * time.Second}, + TCPEstablishedTimeout: metav1.Duration{Duration: 20 * time.Second}, + }, + FeatureGates: "all", + HealthzBindAddress: tc.healthzBindAddress, + HostnameOverride: "foo", + IPTables: componentconfig.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + MasqueradeBit: util.Int32Ptr(17), + MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + SyncPeriod: metav1.Duration{Duration: 60 * time.Second}, + }, + MetricsBindAddress: tc.metricsBindAddress, + Mode: "iptables", + OOMScoreAdj: util.Int32Ptr(17), + PortRange: "2-7", + ResourceContainer: "/foo", + UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond}, + } - config, err := options.loadConfig([]byte(yaml)) - assert.NoError(t, err) - if !reflect.DeepEqual(expected, config) { - t.Fatalf("unexpected config, diff = %s", diff.ObjectDiff(config, expected)) + options, err := NewOptions() + assert.NoError(t, err, "unexpected error for %s: %v", tc.name, err) + + yaml := fmt.Sprintf( + yamlTemplate, tc.bindAddress, tc.clusterCIDR, + tc.healthzBindAddress, tc.metricsBindAddress) + config, err := options.loadConfig([]byte(yaml)) + assert.NoError(t, err, "unexpected error for %s: %v", tc.name, err) + if !reflect.DeepEqual(expected, config) { + t.Fatalf("unexpected config for %s test, diff = %s", tc.name, diff.ObjectDiff(config, expected)) + } + } +} + +// TestLoadConfigFailures tests failure modes for loadConfig() +func TestLoadConfigFailures(t *testing.T) { + testCases := []struct { + name string + config string + expErr string + }{ + { + name: "Decode error test", + config: "Twas bryllyg, and ye slythy toves", + expErr: "could not find expected ':'", + }, + { + name: "Bad config type test", + config: "kind: KubeSchedulerConfiguration", + expErr: "unexpected config type", + }, + } + version := "apiVersion: componentconfig/v1alpha1" + for _, tc := range testCases { + options, _ := NewOptions() + config := fmt.Sprintf("%s\n%s", version, tc.config) + _, err := options.loadConfig([]byte(config)) + if assert.Error(t, err, tc.name) { + assert.Contains(t, err.Error(), tc.expErr, tc.name) + } } } diff --git a/cmd/kubeadm/OWNERS b/cmd/kubeadm/OWNERS index 7987f09f4d0..77bb47edbcf 100644 --- a/cmd/kubeadm/OWNERS +++ b/cmd/kubeadm/OWNERS @@ -12,3 +12,4 @@ reviewers: - lukemarsden - dmmcquay - krousey +- timothysc diff --git a/cmd/kubeadm/app/BUILD b/cmd/kubeadm/app/BUILD index 86cd8c4b48a..457e8b42e37 100644 --- a/cmd/kubeadm/app/BUILD +++ b/cmd/kubeadm/app/BUILD @@ -35,12 +35,13 @@ filegroup( "//cmd/kubeadm/app/constants:all-srcs", "//cmd/kubeadm/app/discovery:all-srcs", "//cmd/kubeadm/app/images:all-srcs", - "//cmd/kubeadm/app/master:all-srcs", "//cmd/kubeadm/app/node:all-srcs", "//cmd/kubeadm/app/phases/addons:all-srcs", "//cmd/kubeadm/app/phases/apiconfig:all-srcs", "//cmd/kubeadm/app/phases/certs:all-srcs", + "//cmd/kubeadm/app/phases/controlplane:all-srcs", "//cmd/kubeadm/app/phases/kubeconfig:all-srcs", + "//cmd/kubeadm/app/phases/selfhosting:all-srcs", "//cmd/kubeadm/app/phases/token:all-srcs", "//cmd/kubeadm/app/preflight:all-srcs", "//cmd/kubeadm/app/util:all-srcs", diff --git a/cmd/kubeadm/app/apis/kubeadm/BUILD b/cmd/kubeadm/app/apis/kubeadm/BUILD index a4241545803..7bcc07f6104 100644 --- a/cmd/kubeadm/app/apis/kubeadm/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -14,7 +14,6 @@ go_library( "env.go", "register.go", "types.go", - "well_known_labels.go", ], tags = ["automanaged"], deps = [ diff --git a/cmd/kubeadm/app/apis/kubeadm/env.go b/cmd/kubeadm/app/apis/kubeadm/env.go index ab97cdbbf1a..415602a653e 100644 --- a/cmd/kubeadm/app/apis/kubeadm/env.go +++ b/cmd/kubeadm/app/apis/kubeadm/env.go @@ -25,15 +25,10 @@ import ( var GlobalEnvParams = SetEnvParams() -// TODO(phase1+) Move these paramaters to the API group -// we need some params for testing etc, let's keep these hidden for now func SetEnvParams() *EnvParams { envParams := map[string]string{ - "kubernetes_dir": "/etc/kubernetes", - "hyperkube_image": "", - "repo_prefix": "gcr.io/google_containers", - "etcd_image": "", + "kubernetes_dir": "/etc/kubernetes", } for k := range envParams { @@ -43,9 +38,6 @@ func SetEnvParams() *EnvParams { } return &EnvParams{ - KubernetesDir: path.Clean(envParams["kubernetes_dir"]), - HyperkubeImage: envParams["hyperkube_image"], - RepositoryPrefix: envParams["repo_prefix"], - EtcdImage: envParams["etcd_image"], + KubernetesDir: path.Clean(envParams["kubernetes_dir"]), } } diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index c1fd7218169..e30f2c3e09e 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -36,7 +36,10 @@ func KubeadmFuzzerFuncs(t apitesting.TestingCommon) []interface{} { obj.CertificatesDir = "foo" obj.APIServerCertSANs = []string{} obj.Token = "foo" + obj.Etcd.Image = "foo" obj.Etcd.DataDir = "foo" + obj.ImageRepository = "foo" + obj.UnifiedControlPlaneImage = "foo" }, func(obj *kubeadm.NodeConfiguration, c fuzz.Continue) { c.FuzzNoCustom(obj) diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index d5885fbd6bb..aef57c56d43 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -23,10 +23,7 @@ import ( ) type EnvParams struct { - KubernetesDir string - HyperkubeImage string - RepositoryPrefix string - EtcdImage string + KubernetesDir string } type MasterConfiguration struct { @@ -37,6 +34,7 @@ type MasterConfiguration struct { Networking Networking KubernetesVersion string CloudProvider string + NodeName string AuthorizationModes []string Token string @@ -55,6 +53,11 @@ type MasterConfiguration struct { APIServerCertSANs []string // CertificatesDir specifies where to store or look for all required certificates CertificatesDir string + + // ImageRepository what container registry to pull control plane images from + ImageRepository string + // UnifiedControlPlaneImage specifies if a specific container image should be used for all control plane components + UnifiedControlPlaneImage string } type API struct { @@ -83,6 +86,8 @@ type Etcd struct { KeyFile string DataDir string ExtraArgs map[string]string + // Image specifies which container image to use for running etcd. If empty, automatically populated by kubeadm using the image repository and default etcd version + Image string } type NodeConfiguration struct { @@ -93,6 +98,7 @@ type NodeConfiguration struct { DiscoveryToken string // Currently we only pay attention to one api server but hope to support >1 in the future DiscoveryTokenAPIServers []string + NodeName string TLSBootstrapToken string Token string } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index a4bd946208c..ee67d3ae3bb 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -33,6 +33,7 @@ const ( DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" DefaultCertificatesDir = "/etc/kubernetes/pki" DefaultEtcdDataDir = "/var/lib/etcd" + DefaultImageRepository = "gcr.io/google_containers" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -68,6 +69,10 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { obj.TokenTTL = constants.DefaultTokenDuration } + if obj.ImageRepository == "" { + obj.ImageRepository = DefaultImageRepository + } + if obj.Etcd.DataDir == "" { obj.Etcd.DataDir = DefaultEtcdDataDir } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go index b555cd3fa36..0962ca93f63 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go @@ -30,6 +30,7 @@ type MasterConfiguration struct { Networking Networking `json:"networking"` KubernetesVersion string `json:"kubernetesVersion"` CloudProvider string `json:"cloudProvider"` + NodeName string `json:"nodeName"` AuthorizationModes []string `json:"authorizationModes"` Token string `json:"token"` @@ -48,6 +49,11 @@ type MasterConfiguration struct { APIServerCertSANs []string `json:"apiServerCertSANs"` // CertificatesDir specifies where to store or look for all required certificates CertificatesDir string `json:"certificatesDir"` + + // ImageRepository what container registry to pull control plane images from + ImageRepository string `json:"imageRepository"` + // UnifiedControlPlaneImage specifies if a specific container image should be used for all control plane components + UnifiedControlPlaneImage string `json:"unifiedControlPlaneImage"` } type API struct { @@ -76,6 +82,8 @@ type Etcd struct { KeyFile string `json:"keyFile"` DataDir string `json:"dataDir"` ExtraArgs map[string]string `json:"extraArgs"` + // Image specifies which container image to use for running etcd. If empty, automatically populated by kubeadm using the image repository and default etcd version + Image string `json:"image"` } type NodeConfiguration struct { @@ -85,6 +93,7 @@ type NodeConfiguration struct { DiscoveryFile string `json:"discoveryFile"` DiscoveryToken string `json:"discoveryToken"` DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers"` + NodeName string `json:"nodeName"` TLSBootstrapToken string `json:"tlsBootstrapToken"` Token string `json:"token"` } diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD index 5ff38a39453..797d7a7daed 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD @@ -15,6 +15,7 @@ go_test( tags = ["automanaged"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -30,6 +31,8 @@ go_library( "//pkg/api/validation:go_default_library", "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", + "//pkg/util/node:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index ad3ecf674cc..f01aae212d1 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -24,6 +24,8 @@ import ( "path/filepath" "strings" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -32,6 +34,7 @@ import ( apivalidation "k8s.io/kubernetes/pkg/api/validation" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" + "k8s.io/kubernetes/pkg/util/node" ) // TODO: Break out the cloudprovider functionality out of core and only support the new flow @@ -41,7 +44,6 @@ var cloudproviders = []string{ "azure", "cloudstack", "gce", - "mesos", "openstack", "ovirt", "photon", @@ -62,6 +64,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...) allErrs = append(allErrs, ValidateAPIServerCertSANs(c.APIServerCertSANs, field.NewPath("cert-altnames"))...) allErrs = append(allErrs, ValidateAbsolutePath(c.CertificatesDir, field.NewPath("certificates-dir"))...) + allErrs = append(allErrs, ValidateNodeName(c.NodeName, field.NewPath("node-name"))...) allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...) return allErrs } @@ -236,6 +239,14 @@ func ValidateAbsolutePath(path string, fldPath *field.Path) field.ErrorList { return allErrs } +func ValidateNodeName(nodename string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if node.GetHostname(nodename) != nodename { + allErrs = append(allErrs, field.Invalid(fldPath, nodename, "nodename is not valid, must be lower case")) + } + return allErrs +} + func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(provider) == 0 { @@ -249,3 +260,10 @@ func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList allErrs = append(allErrs, field.Invalid(fldPath, provider, "cloudprovider not supported")) return allErrs } + +func ValidateMixedArguments(flag *pflag.FlagSet) error { + if flag.Changed("config") && flag.NFlag() != 1 { + return fmt.Errorf("can not mix '--config' with other arguments") + } + return nil +} diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index e892ab3721f..dedaf6267a8 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -19,6 +19,8 @@ package validation import ( "testing" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) @@ -76,6 +78,29 @@ func TestValidateAuthorizationModes(t *testing.T) { } } +func TestValidateNodeName(t *testing.T) { + var tests = []struct { + s string + f *field.Path + expected bool + }{ + {"", nil, false}, // ok if not provided + {"1234", nil, true}, // supported + {"valid-nodename", nil, true}, // supported + {"INVALID-NODENAME", nil, false}, // Upper cases is invalid + } + for _, rt := range tests { + actual := ValidateNodeName(rt.s, rt.f) + if (len(actual) == 0) != rt.expected { + t.Errorf( + "failed ValidateNodeName:\n\texpected: %t\n\t actual: %t", + rt.expected, + (len(actual) == 0), + ) + } + } +} + func TestValidateCloudProvider(t *testing.T) { var tests = []struct { s string @@ -175,6 +200,7 @@ func TestValidateIPNetFromString(t *testing.T) { } func TestValidateMasterConfiguration(t *testing.T) { + nodename := "valid-nodename" var tests = []struct { s *kubeadm.MasterConfiguration expected bool @@ -187,6 +213,7 @@ func TestValidateMasterConfiguration(t *testing.T) { DNSDomain: "cluster.local", }, CertificatesDir: "/some/cert/dir", + NodeName: nodename, }, false}, {&kubeadm.MasterConfiguration{ AuthorizationModes: []string{"Node", "RBAC"}, @@ -196,6 +223,7 @@ func TestValidateMasterConfiguration(t *testing.T) { }, CertificatesDir: "/some/other/cert/dir", Token: "abcdef.0123456789abcdef", + NodeName: nodename, }, true}, {&kubeadm.MasterConfiguration{ AuthorizationModes: []string{"Node", "RBAC"}, @@ -204,6 +232,7 @@ func TestValidateMasterConfiguration(t *testing.T) { DNSDomain: "cluster.local", }, CertificatesDir: "/some/cert/dir", + NodeName: nodename, }, false}, {&kubeadm.MasterConfiguration{ AuthorizationModes: []string{"Node", "RBAC"}, @@ -213,6 +242,7 @@ func TestValidateMasterConfiguration(t *testing.T) { }, CertificatesDir: "/some/other/cert/dir", Token: "abcdef.0123456789abcdef", + NodeName: nodename, }, true}, } for _, rt := range tests { @@ -250,3 +280,42 @@ func TestValidateNodeConfiguration(t *testing.T) { } } } + +func TestValidateMixedArguments(t *testing.T) { + var tests = []struct { + args []string + expected bool + }{ + {[]string{"--foo=bar"}, true}, + {[]string{"--config=hello"}, true}, + {[]string{"--foo=bar", "--config=hello"}, false}, + } + + var cfgPath string + var skipPreFlight bool + + for _, rt := range tests { + f := pflag.NewFlagSet("test", pflag.ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + f.String("foo", "", "string value") + f.StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file") + f.BoolVar( + &skipPreFlight, "skip-preflight-checks", skipPreFlight, + "Skip preflight checks normally run before modifying the system", + ) + if err := f.Parse(rt.args); err != nil { + t.Fatal(err) + } + + actual := ValidateMixedArguments(f) + if (actual == nil) != rt.expected { + t.Errorf( + "failed ValidateMixedArguments:\n\texpected: %t\n\t actual: %t", + rt.expected, + (actual == nil), + ) + } + } +} diff --git a/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go b/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go deleted file mode 100644 index f2fb530fdc0..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -// Role labels are applied to Nodes to mark their purpose. In particular, we -// usually want to distinguish the master, so that we can isolate privileged -// pods and operations. -// -// Originally we relied on not registering the master, on the fact that the -// master was Unschedulable, and on static manifests for master components. -// But we now do register masters in many environments, are generally moving -// away from static manifests (for better manageability), and working towards -// deprecating the unschedulable field (replacing it with taints & tolerations -// instead). -// -// Even with tainting, a label remains the easiest way of making a positive -// selection, so that pods can schedule only to master nodes for example, and -// thus installations will likely define a label for their master nodes. -// -// So that we can recognize master nodes in consequent places though (such as -// kubectl get nodes), we encourage installations to use the well-known labels. -// We define NodeLabelRole, which is the preferred form, but we will also recognize -// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole). - -const ( - // NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose. - // Use of NodeLabelRole is preferred. - NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role" -) diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index be4434e1891..807054c4b51 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -13,7 +13,6 @@ go_library( srcs = [ "cmd.go", "completion.go", - "defaults.go", "init.go", "join.go", "reset.go", @@ -28,15 +27,16 @@ go_library( "//cmd/kubeadm/app/cmd/phases:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/discovery:go_default_library", - "//cmd/kubeadm/app/master:go_default_library", "//cmd/kubeadm/app/node:go_default_library", "//cmd/kubeadm/app/phases/addons:go_default_library", "//cmd/kubeadm/app/phases/apiconfig:go_default_library", - "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", + "//cmd/kubeadm/app/phases/selfhosting:go_default_library", "//cmd/kubeadm/app/phases/token:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//cmd/kubeadm/app/util/token:go_default_library", "//pkg/api:go_default_library", @@ -45,6 +45,7 @@ go_library( "//pkg/printers:go_default_library", "//pkg/util/i18n:go_default_library", "//pkg/util/initsystem:go_default_library", + "//pkg/util/node:go_default_library", "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", @@ -54,7 +55,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -65,7 +65,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "defaults_test.go", "reset_test.go", "token_test.go", ], diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 4e5ac6135d0..51e45c6dc29 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -31,15 +31,17 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" + cmdphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master" addonsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons" apiconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig" - certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" + selfhostingphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting" tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/token" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/version" ) @@ -84,7 +86,13 @@ func NewCmdInit(out io.Writer) *cobra.Command { i, err := NewInit(cfgPath, internalcfg, skipPreFlight, skipTokenPrint) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(i.Validate()) + kubeadmutil.CheckErr(i.Validate(cmd)) + + // TODO: remove this warning in 1.9 + if !cmd.Flags().Lookup("token-ttl").Changed { + fmt.Println("[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)") + } + kubeadmutil.CheckErr(i.Run(out)) }, } @@ -121,6 +129,10 @@ func NewCmdInit(out io.Writer) *cobra.Command { &cfg.APIServerCertSANs, "apiserver-cert-extra-sans", cfg.APIServerCertSANs, `Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.`, ) + cmd.PersistentFlags().StringVar( + &cfg.NodeName, "node-name", cfg.NodeName, + `Specify the node name`, + ) cmd.PersistentFlags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") @@ -132,6 +144,10 @@ func NewCmdInit(out io.Writer) *cobra.Command { &skipTokenPrint, "skip-token-print", skipTokenPrint, "Skip printing of the default bootstrap token generated by 'kubeadm init'", ) + cmd.PersistentFlags().BoolVar( + &cfg.SelfHosted, "self-hosted", cfg.SelfHosted, + "[experimental] If kubeadm should make this control plane self-hosted", + ) cmd.PersistentFlags().StringVar( &cfg.Token, "token", cfg.Token, @@ -159,11 +175,20 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight, } // Set defaults dynamically that the API group defaulting can't (by fetching information from the internet, looking up network interfaces, etc.) - err := setInitDynamicDefaults(cfg) + err := configutil.SetInitDynamicDefaults(cfg) if err != nil { return nil, err } + fmt.Printf("[init] Using Kubernetes version: %s\n", cfg.KubernetesVersion) + fmt.Printf("[init] Using Authorization mode: %v\n", cfg.AuthorizationModes) + + // Warn about the limitations with the current cloudprovider solution. + if cfg.CloudProvider != "" { + fmt.Println("[init] WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.") + fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)") + } + if !skipPreFlight { fmt.Println("[preflight] Running pre-flight checks") @@ -186,7 +211,10 @@ type Init struct { } // Validate validates configuration passed to "kubeadm init" -func (i *Init) Validate() error { +func (i *Init) Validate(cmd *cobra.Command) error { + if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil { + return err + } return validation.ValidateMasterConfiguration(i.cfg).ToAggregate() } @@ -194,7 +222,7 @@ func (i *Init) Validate() error { func (i *Init) Run(out io.Writer) error { // PHASE 1: Generate certificates - err := certphase.CreatePKIAssets(i.cfg) + err := cmdphases.CreatePKIAssets(i.cfg) if err != nil { return err } @@ -202,36 +230,26 @@ func (i *Init) Run(out io.Writer) error { // PHASE 2: Generate kubeconfig files for the admin and the kubelet masterEndpoint := fmt.Sprintf("https://%s:%d", i.cfg.API.AdvertiseAddress, i.cfg.API.BindPort) - err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir) + err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir, i.cfg.NodeName) if err != nil { return err } // PHASE 3: Bootstrap the control plane - if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil { + if err := controlplanephase.WriteStaticPodManifests(i.cfg); err != nil { return err } adminKubeConfigPath := filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AdminKubeConfigFileName) - client, err := kubemaster.CreateClientAndWaitForAPI(adminKubeConfigPath) + client, err := kubeadmutil.CreateClientAndWaitForAPI(adminKubeConfigPath) if err != nil { return err } - if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client); err != nil { + if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client, i.cfg.NodeName); err != nil { return err } - // Is deployment type self-hosted? - if i.cfg.SelfHosted { - // Temporary control plane is up, now we create our self hosted control - // plane components and remove the static manifests: - fmt.Println("[self-hosted] Creating self-hosted control plane...") - if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil { - return err - } - } - // PHASE 4: Set up the bootstrap tokens if !i.skipTokenPrint { fmt.Printf("[token] Using token: %s\n", i.cfg.Token) @@ -268,6 +286,16 @@ func (i *Init) Run(out io.Writer) error { return err } + // Is deployment type self-hosted? + if i.cfg.SelfHosted { + // Temporary control plane is up, now we create our self hosted control + // plane components and remove the static manifests: + fmt.Println("[self-hosted] Creating self-hosted control plane...") + if err := selfhostingphase.CreateSelfHostedControlPlane(i.cfg, client); err != nil { + return err + } + } + ctx := map[string]string{ "KubeConfigPath": filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AdminKubeConfigFileName), "KubeConfigName": kubeadmconstants.AdminKubeConfigFileName, diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 60518e6990f..9e99c2cfc00 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "io/ioutil" - "os" "path/filepath" "github.com/renstrom/dedent" @@ -33,11 +32,12 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/discovery" - kubenode "k8s.io/kubernetes/cmd/kubeadm/app/node" + kubeadmnode "k8s.io/kubernetes/cmd/kubeadm/app/node" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" "k8s.io/kubernetes/pkg/api" + nodeutil "k8s.io/kubernetes/pkg/util/node" ) var ( @@ -63,26 +63,26 @@ func NewCmdJoin(out io.Writer) *cobra.Command { Use: "join [DiscoveryTokenAPIServers]", Short: "Run this on any machine you wish to join an existing cluster", Long: dedent.Dedent(` - When joining a kubeadm initialized cluster, we need to establish - bidirectional trust. This is split into discovery (having the Node - trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes + When joining a kubeadm initialized cluster, we need to establish + bidirectional trust. This is split into discovery (having the Node + trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes Master trust the Node). - There are 2 main schemes for discovery. The first is to use a shared - token along with the IP address of the API server. The second is to - provide a file (a subset of the standard kubeconfig file). This file - can be a local file or downloaded via an HTTPS URL. The forms are - kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443, + There are 2 main schemes for discovery. The first is to use a shared + token along with the IP address of the API server. The second is to + provide a file (a subset of the standard kubeconfig file). This file + can be a local file or downloaded via an HTTPS URL. The forms are + kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443, kubeadm join --discovery-file path/to/file.conf, or kubeadm join - --discovery-file https://url/file.conf. Only one form can be used. If - the discovery information is loaded from a URL, HTTPS must be used and + --discovery-file https://url/file.conf. Only one form can be used. If + the discovery information is loaded from a URL, HTTPS must be used and the host installed CA bundle is used to verify the connection. - The TLS bootstrap mechanism is also driven via a shared token. This is + The TLS bootstrap mechanism is also driven via a shared token. This is used to temporarily authenticate with the Kubernetes Master to submit a - certificate signing request (CSR) for a locally created key pair. By - default kubeadm will set up the Kubernetes Master to automatically - approve these signing requests. This token is passed in with the + certificate signing request (CSR) for a locally created key pair. By + default kubeadm will set up the Kubernetes Master to automatically + approve these signing requests. This token is passed in with the --tls-bootstrap-token abcdef.1234567890abcdef flag. Often times the same token is used for both parts. In this case, the @@ -97,7 +97,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command { j, err := NewJoin(cfgPath, args, internalcfg, skipPreFlight) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(j.Validate()) + kubeadmutil.CheckErr(j.Validate(cmd)) kubeadmutil.CheckErr(j.Run(out)) }, } @@ -112,6 +112,9 @@ func NewCmdJoin(out io.Writer) *cobra.Command { cmd.PersistentFlags().StringVar( &cfg.DiscoveryToken, "discovery-token", "", "A token used to validate cluster information fetched from the master") + cmd.PersistentFlags().StringVar( + &cfg.NodeName, "node-name", "", + "Specify the node name") cmd.PersistentFlags().StringVar( &cfg.TLSBootstrapToken, "tls-bootstrap-token", "", "A token used for TLS bootstrapping") @@ -161,7 +164,10 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, s return &Join{cfg: cfg}, nil } -func (j *Join) Validate() error { +func (j *Join) Validate(cmd *cobra.Command) error { + if err := validation.ValidateMixedArguments(cmd.PersistentFlags()); err != nil { + return err + } return validation.ValidateNodeConfiguration(j.cfg).ToAggregate() } @@ -172,18 +178,16 @@ func (j *Join) Run(out io.Writer) error { return err } - hostname, err := os.Hostname() - if err != nil { - return err - } + hostname := nodeutil.GetHostname(j.cfg.NodeName) + client, err := kubeconfigutil.KubeConfigToClientSet(cfg) if err != nil { return err } - if err := kubenode.ValidateAPIServer(client); err != nil { + if err := kubeadmnode.ValidateAPIServer(client); err != nil { return err } - if err := kubenode.PerformTLSBootstrap(cfg, hostname); err != nil { + if err := kubeadmnode.PerformTLSBootstrap(cfg, hostname); err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index 883f2c40f2a..c393b57b7ea 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -14,20 +15,38 @@ go_library( "kubeconfig.go", "phase.go", "preflight.go", + "selfhosting.go", ], tags = ["automanaged"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", + "//cmd/kubeadm/app/phases/selfhosting:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/config:go_default_library", + "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//pkg/api:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["certs_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm/install:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library", + "//vendor/github.com/renstrom/dedent:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/phases/certs.go b/cmd/kubeadm/app/cmd/phases/certs.go index b8a90749c14..e1470131197 100644 --- a/cmd/kubeadm/app/cmd/phases/certs.go +++ b/cmd/kubeadm/app/cmd/phases/certs.go @@ -17,18 +17,20 @@ limitations under the License. package phases import ( + "crypto/rsa" + "crypto/x509" "fmt" - "net" "github.com/spf13/cobra" - netutil "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/validation/field" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" "k8s.io/kubernetes/pkg/api" ) @@ -40,63 +42,342 @@ func NewCmdCerts() *cobra.Command { RunE: subCmdRunE("certs"), } - cmd.AddCommand(NewCmdSelfSign()) + cmd.AddCommand(newSubCmdCerts()...) return cmd } -func NewCmdSelfSign() *cobra.Command { - // TODO: Move this into a dedicated Certificates Phase API object +// newSubCmdCerts returns sub commands for certs phase +func newSubCmdCerts() []*cobra.Command { + cfg := &kubeadmapiext.MasterConfiguration{} // Default values for the cobra help text api.Scheme.Default(cfg) - cmd := &cobra.Command{ - Use: "selfsign", - Short: "Generate the CA, APIServer signing/client cert, the ServiceAccount public/private keys and a CA and client cert for the front proxy", - Run: func(cmd *cobra.Command, args []string) { + var cfgPath string + var subCmds []*cobra.Command - // Run the defaulting once again to take passed flags into account - api.Scheme.Default(cfg) - internalcfg := &kubeadmapi.MasterConfiguration{} - api.Scheme.Convert(cfg, internalcfg, nil) - - err := RunSelfSign(internalcfg) - kubeadmutil.CheckErr(err) + subCmdProperties := []struct { + use string + short string + cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error + }{ + { + use: "all", + short: "Generate all PKI assets necessary to establish the control plane", + cmdFunc: CreatePKIAssets, + }, + { + use: "ca", + short: "Generate CA certificate and key for a Kubernetes cluster.", + cmdFunc: createOrUseCACertAndKey, + }, + { + use: "apiserver", + short: "Generate API Server serving certificate and key.", + cmdFunc: createOrUseAPIServerCertAndKey, + }, + { + use: "apiserver-kubelet-client", + short: "Generate a client certificate for the API Server to connect to the kubelets securely.", + cmdFunc: createOrUseAPIServerKubeletClientCertAndKey, + }, + { + use: "sa", + short: "Generate a private key for signing service account tokens along with its public key.", + cmdFunc: createOrUseServiceAccountKeyAndPublicKey, + }, + { + use: "front-proxy-ca", + short: "Generate front proxy CA certificate and key for a Kubernetes cluster.", + cmdFunc: createOrUseFrontProxyCACertAndKey, + }, + { + use: "front-proxy-client", + short: "Generate front proxy CA client certificate and key for a Kubernetes cluster.", + cmdFunc: createOrUseFrontProxyClientCertAndKey, }, } - cmd.Flags().StringVar(&cfg.Networking.DNSDomain, "dns-domain", cfg.Networking.DNSDomain, "The DNS Domain for the Kubernetes cluster.") - cmd.Flags().StringVar(&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir, "The path where to save and store the certificates.") - cmd.Flags().StringVar(&cfg.Networking.ServiceSubnet, "service-cidr", cfg.Networking.ServiceSubnet, "The subnet for the Services in the cluster.") - cmd.Flags().StringSliceVar(&cfg.APIServerCertSANs, "cert-altnames", []string{}, "Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.") - cmd.Flags().StringVar(&cfg.API.AdvertiseAddress, "apiserver-advertise-address", cfg.API.AdvertiseAddress, "The IP address the API Server will advertise it's listening on. 0.0.0.0 means the default network interface's address.") - return cmd + for _, properties := range subCmdProperties { + // Creates the UX Command + cmd := &cobra.Command{ + Use: properties.use, + Short: properties.short, + Run: runCmdFunc(properties.cmdFunc, &cfgPath, cfg), + } + + // Add flags to the command + cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") + cmd.Flags().StringVar(&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir, "The path where to save and store the certificates") + if properties.use == "all" || properties.use == "apiserver" { + cmd.Flags().StringVar(&cfg.Networking.DNSDomain, "service-dns-domain", cfg.Networking.DNSDomain, "Use alternative domain for services, e.g. \"myorg.internal\"") + cmd.Flags().StringVar(&cfg.Networking.ServiceSubnet, "service-cidr", cfg.Networking.ServiceSubnet, "Use alternative range of IP address for service VIPs") + cmd.Flags().StringSliceVar(&cfg.APIServerCertSANs, "apiserver-cert-extra-sans", []string{}, "Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.") + cmd.Flags().StringVar(&cfg.API.AdvertiseAddress, "apiserver-advertise-address", cfg.API.AdvertiseAddress, "The IP address the API Server will advertise it's listening on. 0.0.0.0 means the default network interface's address.") + } + + subCmds = append(subCmds, cmd) + } + + return subCmds } -// RunSelfSign generates certificate assets in the specified directory -func RunSelfSign(config *kubeadmapi.MasterConfiguration) error { - if err := validateArgs(config); err != nil { - return fmt.Errorf("The argument validation failed: %v", err) +// runCmdFunc creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of inpunt parameters) +func runCmdFunc(cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) { + + // the following statement build a clousure that wraps a call to a CreateCertFunc, binding + // the function itself with the specific parameters of each sub command. + // Please note that specific parameter should be passed as value, while other parameters - passed as reference - + // are shared between sub commnands and gets access to current value e.g. flags value. + + return func(cmd *cobra.Command, args []string) { + internalcfg := &kubeadmapi.MasterConfiguration{} + + // Takes passed flags into account; the defaulting is executed once again enforcing assignement of + // static default values to cfg only for values not provided with flags + api.Scheme.Default(cfg) + api.Scheme.Convert(cfg, internalcfg, nil) + + // Loads configuration from config file, if provided + // Nb. --config overrides command line flags + err := configutil.TryLoadMasterConfiguration(*cfgPath, internalcfg) + kubeadmutil.CheckErr(err) + + // Applies dynamic defaults to settings not provided with flags + err = configutil.SetInitDynamicDefaults(internalcfg) + kubeadmutil.CheckErr(err) + + // Validates cfg (flags/configs + defaults + dynamic defaults) + err = validation.ValidateMasterConfiguration(internalcfg).ToAggregate() + kubeadmutil.CheckErr(err) + + // Execute the cmdFunc + err = cmdFunc(internalcfg) + kubeadmutil.CheckErr(err) + } +} + +// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane. +// Please note that this action is a bulk action calling all the atomic certphase actions +func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error { + + certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{ + createOrUseCACertAndKey, + createOrUseAPIServerCertAndKey, + createOrUseAPIServerKubeletClientCertAndKey, + createOrUseServiceAccountKeyAndPublicKey, + createOrUseFrontProxyCACertAndKey, + createOrUseFrontProxyClientCertAndKey, } - // If it's possible to detect the default IP, add it to the SANs as well. Otherwise, just go with the provided ones - ip, err := netutil.ChooseBindAddress(net.ParseIP(config.API.AdvertiseAddress)) - if err == nil { - config.API.AdvertiseAddress = ip.String() + for _, action := range certActions { + err := action(cfg) + if err != nil { + return err + } } - if err = certphase.CreatePKIAssets(config); err != nil { - return err + fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir) + + return nil +} + +// createOrUseCACertAndKey create a new self signed CA, or use the existing one. +func createOrUseCACertAndKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseCertificateAuthorithy( + cfg.CertificatesDir, + kubeadmconstants.CACertAndKeyBaseName, + "CA", + certphase.NewCACertAndKey, + ) +} + +// createOrUseAPIServerCertAndKey create a new CA certificate for apiserver, or use the existing one. +// It assumes the CA certificates should exists into the CertificatesDir +func createOrUseAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseSignedCertificate( + cfg.CertificatesDir, + kubeadmconstants.CACertAndKeyBaseName, + kubeadmconstants.APIServerCertAndKeyBaseName, + "API server", + func(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) { + return certphase.NewAPIServerCertAndKey(cfg, caCert, caKey) + }, + ) +} + +// create a new CA certificate for kubelets calling apiserver, or use the existing one +// It assumes the CA certificates should exists into the CertificatesDir +func createOrUseAPIServerKubeletClientCertAndKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseSignedCertificate( + cfg.CertificatesDir, + kubeadmconstants.CACertAndKeyBaseName, + kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, + "API server kubelet client", + certphase.NewAPIServerKubeletClientCertAndKey, + ) +} + +// createOrUseServiceAccountKeyAndPublicKey create a new public/private key pairs for signing service account user, or use the existing one. +func createOrUseServiceAccountKeyAndPublicKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseKeyAndPublicKey( + cfg.CertificatesDir, + kubeadmconstants.ServiceAccountKeyBaseName, + "service account", + certphase.NewServiceAccountSigningKey, + ) +} + +// createOrUseFrontProxyCACertAndKey create a new self signed front proxy CA, or use the existing one. +func createOrUseFrontProxyCACertAndKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseCertificateAuthorithy( + cfg.CertificatesDir, + kubeadmconstants.FrontProxyCACertAndKeyBaseName, + "front-proxy CA", + certphase.NewFrontProxyCACertAndKey, + ) +} + +// createOrUseFrontProxyClientCertAndKey create a new certificate for proxy server client, or use the existing one. +// It assumes the front proxy CA certificates should exists into the CertificatesDir +func createOrUseFrontProxyClientCertAndKey(cfg *kubeadmapi.MasterConfiguration) error { + + return createOrUseSignedCertificate( + cfg.CertificatesDir, + kubeadmconstants.FrontProxyCACertAndKeyBaseName, + kubeadmconstants.FrontProxyClientCertAndKeyBaseName, + "front-proxy client", + certphase.NewFrontProxyClientCertAndKey, + ) +} + +// createOrUseCertificateAuthorithy is a generic function that will create a new certificate Authorithy using the given newFunc, +// assign file names according to the given baseName, or use the existing one already present in pkiDir. +func createOrUseCertificateAuthorithy(pkiDir string, baseName string, UXName string, newFunc func() (*x509.Certificate, *rsa.PrivateKey, error)) error { + + // If cert or key exists, we should try to load them + if pkiutil.CertOrKeyExist(pkiDir, baseName) { + + // Try to load .crt and .key from the PKI directory + caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName) + if err != nil { + return fmt.Errorf("failure loading %s certificate: %v", UXName, err) + } + + // Check if the existing cert is a CA + if !caCert.IsCA { + return fmt.Errorf("certificate %s is not a CA", UXName) + } + + fmt.Printf("[certificates] Using the existing %s certificate and key.\n", UXName) + } else { + // The certificate and the key did NOT exist, let's generate them now + caCert, caKey, err := newFunc() + if err != nil { + return fmt.Errorf("failure while generating %s certificate and key: %v", UXName, err) + } + + // Write .crt and .key files to disk + if err = pkiutil.WriteCertAndKey(pkiDir, baseName, caCert, caKey); err != nil { + return fmt.Errorf("failure while saving %s certificate and key: %v", UXName, err) + } + + fmt.Printf("[certificates] Generated %s certificate and key.\n", UXName) } return nil } -func validateArgs(config *kubeadmapi.MasterConfiguration) error { - allErrs := field.ErrorList{} - allErrs = append(allErrs, validation.ValidateNetworking(&config.Networking, field.NewPath("networking"))...) - allErrs = append(allErrs, validation.ValidateAbsolutePath(config.CertificatesDir, field.NewPath("cert-dir"))...) - allErrs = append(allErrs, validation.ValidateAPIServerCertSANs(config.APIServerCertSANs, field.NewPath("cert-altnames"))...) - allErrs = append(allErrs, validation.ValidateIPFromString(config.API.AdvertiseAddress, field.NewPath("apiserver-advertise-address"))...) +// createOrUseSignedCertificate is a generic function that will create a new signed certificate using the given newFunc, +// assign file names according to the given baseName, or use the existing one already present in pkiDir. +func createOrUseSignedCertificate(pkiDir string, CABaseName string, baseName string, UXName string, newFunc func(*x509.Certificate, *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error)) error { - return allErrs.ToAggregate() + // Checks if certificate authorithy exists in the PKI directory + if !pkiutil.CertOrKeyExist(pkiDir, CABaseName) { + return fmt.Errorf("couldn't load certificate authorithy for %s from certificate dir", UXName) + } + + // Try to load certificate authorithy .crt and .key from the PKI directory + caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, CABaseName) + if err != nil { + return fmt.Errorf("failure loading certificate authorithy for %s: %v", UXName, err) + } + + // Make sure the loaded CA cert actually is a CA + if !caCert.IsCA { + return fmt.Errorf("certificate authorithy for %s is not a CA", UXName) + } + + // Checks if the signed certificate exists in the PKI directory + if pkiutil.CertOrKeyExist(pkiDir, baseName) { + // Try to load signed certificate .crt and .key from the PKI directory + signedCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName) + if err != nil { + return fmt.Errorf("failure loading %s certificate: %v", UXName, err) + } + + // Check if the existing cert is signed by the given CA + if err := signedCert.CheckSignatureFrom(caCert); err != nil { + return fmt.Errorf("certificate %s is not signed by corresponding CA", UXName) + } + + fmt.Printf("[certificates] Using the existing %s certificate and key.\n", UXName) + } else { + // The certificate and the key did NOT exist, let's generate them now + signedCert, signedKey, err := newFunc(caCert, caKey) + if err != nil { + return fmt.Errorf("failure while generating %s key and certificate: %v", UXName, err) + } + + // Write .crt and .key files to disk + if err = pkiutil.WriteCertAndKey(pkiDir, baseName, signedCert, signedKey); err != nil { + return fmt.Errorf("failure while saving %s certificate and key: %v", UXName, err) + } + + fmt.Printf("[certificates] Generated %s certificate and key.\n", UXName) + if pkiutil.HasServerAuth(signedCert) { + fmt.Printf("[certificates] %s serving cert is signed for DNS names %v and IPs %v\n", UXName, signedCert.DNSNames, signedCert.IPAddresses) + } + } + + return nil +} + +// createOrUseKeyAndPublicKey is a generic function that will create a new public/private key pairs using the given newFunc, +// assign file names according to the given baseName, or use the existing one already present in pkiDir. +func createOrUseKeyAndPublicKey(pkiDir string, baseName string, UXName string, newFunc func() (*rsa.PrivateKey, error)) error { + + // Checks if the key exists in the PKI directory + if pkiutil.CertOrKeyExist(pkiDir, baseName) { + + // Try to load .key from the PKI directory + _, err := pkiutil.TryLoadKeyFromDisk(pkiDir, baseName) + if err != nil { + return fmt.Errorf("%s key existed but they could not be loaded properly: %v", UXName, err) + } + + fmt.Printf("[certificates] Using the existing %s key.\n", UXName) + } else { + // The key does NOT exist, let's generate it now + key, err := newFunc() + if err != nil { + return fmt.Errorf("failure while generating %s key: %v", UXName, err) + } + + // Write .key and .pub files to disk + if err = pkiutil.WriteKey(pkiDir, baseName, key); err != nil { + return fmt.Errorf("failure while saving %s key: %v", UXName, err) + } + + if err = pkiutil.WritePublicKey(pkiDir, baseName, &key.PublicKey); err != nil { + return fmt.Errorf("failure while saving %s public key: %v", UXName, err) + } + fmt.Printf("[certificates] Generated %s key and public key.\n", UXName) + } + + return nil } diff --git a/cmd/kubeadm/app/cmd/phases/certs_test.go b/cmd/kubeadm/app/cmd/phases/certs_test.go new file mode 100644 index 00000000000..c6fad4b27bc --- /dev/null +++ b/cmd/kubeadm/app/cmd/phases/certs_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "fmt" + "html/template" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/renstrom/dedent" + "github.com/spf13/cobra" + + // required for triggering api machinery startup when running unit tests + _ "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install" + + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil" +) + +func TestSubCmdCertsCreateFiles(t *testing.T) { + + subCmds := newSubCmdCerts() + + var tests = []struct { + subCmds []string + expectedFiles []string + }{ + { + subCmds: []string{"all"}, + expectedFiles: []string{ + kubeadmconstants.CACertName, kubeadmconstants.CAKeyName, + kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName, + kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName, + kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName, + kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName, + kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName, + }, + }, + { + subCmds: []string{"ca"}, + expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName}, + }, + { + subCmds: []string{"ca", "apiserver"}, + expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName, kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName}, + }, + { + subCmds: []string{"ca", "apiserver-kubelet-client"}, + expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName, kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName}, + }, + { + subCmds: []string{"sa"}, + expectedFiles: []string{kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName}, + }, + { + subCmds: []string{"front-proxy-ca"}, + expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName}, + }, + { + subCmds: []string{"front-proxy-ca", "front-proxy-client"}, + expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName, kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName}, + }, + } + + for _, test := range tests { + // Temporary folder for the test case + tmpdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Couldn't create tmpdir") + } + defer os.RemoveAll(tmpdir) + + // executes given sub commands + for _, subCmdName := range test.subCmds { + subCmd := getSubCmd(t, subCmdName, subCmds) + subCmd.SetArgs([]string{fmt.Sprintf("--cert-dir=%s", tmpdir)}) + if err := subCmd.Execute(); err != nil { + t.Fatalf("Could not execute subcommand: %s", subCmdName) + } + } + + // verify expected files are there + assertFilesCount(t, tmpdir, len(test.expectedFiles)) + for _, file := range test.expectedFiles { + assertFileExists(t, tmpdir, file) + } + } +} + +func TestSubCmdApiServerFlags(t *testing.T) { + + subCmds := newSubCmdCerts() + + // Temporary folder for the test case + tmpdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Couldn't create tmpdir") + } + defer os.RemoveAll(tmpdir) + + // creates ca cert + subCmd := getSubCmd(t, "ca", subCmds) + subCmd.SetArgs([]string{fmt.Sprintf("--cert-dir=%s", tmpdir)}) + if err := subCmd.Execute(); err != nil { + t.Fatalf("Could not execute subcommand ca") + } + + // creates apiserver cert + subCmd = getSubCmd(t, "apiserver", subCmds) + subCmd.SetArgs([]string{ + fmt.Sprintf("--cert-dir=%s", tmpdir), + "--apiserver-cert-extra-sans=foo,boo", + "--service-cidr=10.0.0.0/24", + "--service-dns-domain=mycluster.local", + "--apiserver-advertise-address=1.2.3.4", + }) + if err := subCmd.Execute(); err != nil { + t.Fatalf("Could not execute subcommand apiserver") + } + + APIserverCert, err := pkiutil.TryLoadCertFromDisk(tmpdir, kubeadmconstants.APIServerCertAndKeyBaseName) + if err != nil { + t.Fatalf("Error loading API server certificate: %v", err) + } + + hostname, err := os.Hostname() + if err != nil { + t.Errorf("couldn't get the hostname: %v", err) + } + for i, name := range []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.mycluster.local"} { + if APIserverCert.DNSNames[i] != name { + t.Errorf("APIserverCert.DNSNames[%d] is %s instead of %s", i, APIserverCert.DNSNames[i], name) + } + } + for i, ip := range []string{"10.0.0.1", "1.2.3.4"} { + if APIserverCert.IPAddresses[i].String() != ip { + t.Errorf("APIserverCert.IPAddresses[%d] is %s instead of %s", i, APIserverCert.IPAddresses[i], ip) + } + } +} + +func TestSubCmdReadsConfig(t *testing.T) { + + subCmds := newSubCmdCerts() + + var tests = []struct { + subCmds []string + expectedFileCount int + }{ + { + subCmds: []string{"sa"}, + expectedFileCount: 2, + }, + { + subCmds: []string{"front-proxy-ca", "front-proxy-client"}, + expectedFileCount: 4, + }, + { + subCmds: []string{"ca", "apiserver", "apiserver-kubelet-client"}, + expectedFileCount: 6, + }, + { + subCmds: []string{"all"}, + expectedFileCount: 12, + }, + } + + for _, test := range tests { + // Temporary folder for the test case + tmpdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Couldn't create tmpdir") + } + defer os.RemoveAll(tmpdir) + + configPath := saveDummyCfg(t, tmpdir) + + // executes given sub commands + for _, subCmdName := range test.subCmds { + subCmd := getSubCmd(t, subCmdName, subCmds) + subCmd.SetArgs([]string{fmt.Sprintf("--config=%s", configPath)}) + if err := subCmd.Execute(); err != nil { + t.Fatalf("Could not execute command: %s", subCmdName) + } + } + + // verify expected files are there + // NB. test.expectedFileCount + 1 because in this test case the tempdir where key/certificates + // are saved contains also the dummy configuration file + assertFilesCount(t, tmpdir, test.expectedFileCount+1) + } +} + +func getSubCmd(t *testing.T, name string, subCmds []*cobra.Command) *cobra.Command { + for _, subCmd := range subCmds { + if subCmd.Name() == name { + return subCmd + } + } + t.Fatalf("Unable to find sub command %s", name) + + return nil +} + +func assertFilesCount(t *testing.T, dirName string, count int) { + files, err := ioutil.ReadDir(dirName) + if err != nil { + t.Fatalf("Couldn't read files from tmpdir: %s", err) + } + + if len(files) != count { + t.Errorf("dir does contains %d, %d expected", len(files), count) + for _, f := range files { + t.Error(f.Name()) + } + } +} + +func assertFileExists(t *testing.T, dirName string, fileName string) { + path := path.Join(dirName, fileName) + if _, err := os.Stat(path); os.IsNotExist(err) { + t.Errorf("file %s does not exist", fileName) + } +} + +func saveDummyCfg(t *testing.T, dirName string) string { + + path := path.Join(dirName, "dummyconfig.yaml") + cfgTemplate := template.Must(template.New("init").Parse(dedent.Dedent(` + apiVersion: kubeadm.k8s.io/v1alpha1 + kind: MasterConfiguration + certificatesDir: {{.CertificatesDir}} + `))) + + f, err := os.Create(path) + if err != nil { + t.Errorf("error creating dummyconfig file %s: %v", path, err) + } + + templateData := struct { + CertificatesDir string + }{ + CertificatesDir: dirName, + } + + err = cfgTemplate.Execute(f, templateData) + if err != nil { + t.Errorf("error generating dummyconfig file %s: %v", path, err) + } + f.Close() + + return path +} diff --git a/cmd/kubeadm/app/cmd/phases/phase.go b/cmd/kubeadm/app/cmd/phases/phase.go index 0ef3491ff66..62a0b3754c6 100644 --- a/cmd/kubeadm/app/cmd/phases/phase.go +++ b/cmd/kubeadm/app/cmd/phases/phase.go @@ -33,6 +33,7 @@ func NewCmdPhase(out io.Writer) *cobra.Command { cmd.AddCommand(NewCmdKubeConfig(out)) cmd.AddCommand(NewCmdCerts()) cmd.AddCommand(NewCmdPreFlight()) + cmd.AddCommand(NewCmdSelfhosting()) return cmd } diff --git a/cmd/kubeadm/app/cmd/phases/selfhosting.go b/cmd/kubeadm/app/cmd/phases/selfhosting.go new file mode 100644 index 00000000000..2cd4ca14759 --- /dev/null +++ b/cmd/kubeadm/app/cmd/phases/selfhosting.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "github.com/spf13/cobra" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" + "k8s.io/kubernetes/pkg/api" +) + +// NewCmdSelfhosting returns the self-hosting Cobra command +func NewCmdSelfhosting() *cobra.Command { + var kubeConfigFile string + cfg := &kubeadmapiext.MasterConfiguration{} + cmd := &cobra.Command{ + Use: "selfhosting", + Aliases: []string{"selfhosted"}, + Short: "Make a kubeadm cluster self-hosted.", + Run: func(cmd *cobra.Command, args []string) { + api.Scheme.Default(cfg) + internalcfg := &kubeadmapi.MasterConfiguration{} + api.Scheme.Convert(cfg, internalcfg, nil) + client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) + kubeadmutil.CheckErr(err) + + err = selfhosting.CreateSelfHostedControlPlane(internalcfg, client) + kubeadmutil.CheckErr(err) + }, + } + + cmd.Flags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use for talking to the cluster") + return cmd +} diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index e496709b884..7e542f54644 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -109,6 +109,12 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) kubeadmutil.CheckErr(err) + // TODO: remove this warning in 1.9 + if !tokenCmd.Flags().Lookup("ttl").Changed { + // sending this output to stderr s + fmt.Fprintln(errW, "[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --ttl 0)") + } + err = RunCreateToken(out, client, token, tokenDuration, usages, description) kubeadmutil.CheckErr(err) }, diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 4b91728140b..98c43e4a150 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -37,10 +37,12 @@ const ( APIServerCertAndKeyBaseName = "apiserver" APIServerCertName = "apiserver.crt" APIServerKeyName = "apiserver.key" + APIServerCertCommonName = "kube-apiserver" //used as subject.commonname attribute (CN) APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client" APIServerKubeletClientCertName = "apiserver-kubelet-client.crt" APIServerKubeletClientKeyName = "apiserver-kubelet-client.key" + APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" //used as subject.commonname attribute (CN) ServiceAccountKeyBaseName = "sa" ServiceAccountPublicKeyName = "sa.pub" @@ -53,6 +55,7 @@ const ( FrontProxyClientCertAndKeyBaseName = "front-proxy-client" FrontProxyClientCertName = "front-proxy-client.crt" FrontProxyClientKeyName = "front-proxy-client.key" + FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) AdminKubeConfigFileName = "admin.conf" KubeletKubeConfigFileName = "kubelet.conf" @@ -81,8 +84,8 @@ const ( MinimumAddressesInServiceSubnet = 10 // DefaultTokenDuration specifies the default amount of time that a bootstrap token will be valid - // Default behaviour is "never expire" == 0 - DefaultTokenDuration = 0 + // Default behaviour is 24 hours + DefaultTokenDuration = 24 * time.Hour // LabelNodeRoleMaster specifies that a node is a master // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 @@ -107,11 +110,10 @@ var ( DefaultTokenUsages = []string{"signing", "authentication"} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.6.0") - - // MinimumCSRSARApproverVersion specifies the minimum kubernetes version that can be used for enabling the new-in-v1.7 CSR approver based on a SubjectAccessReview - MinimumCSRSARApproverVersion = version.MustParseSemantic("v1.7.0-beta.0") - - // MinimumAPIAggregationVersion specifies the minimum kubernetes version that can be used enabling the API aggregation in the apiserver and the front proxy flags - MinimumAPIAggregationVersion = version.MustParseSemantic("v1.7.0-alpha.1") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.7.0") ) + +// BuildStaticManifestFilepath returns the location on the disk where the Static Pod should be present +func BuildStaticManifestFilepath(componentName string) string { + return filepath.Join(KubernetesDir, ManifestsSubDirName, componentName+".yaml") +} diff --git a/cmd/kubeadm/app/discovery/token/token.go b/cmd/kubeadm/app/discovery/token/token.go index 299f948f986..f765a4153dc 100644 --- a/cmd/kubeadm/app/discovery/token/token.go +++ b/cmd/kubeadm/app/discovery/token/token.go @@ -58,7 +58,7 @@ func RetrieveValidatedClusterInfo(discoveryToken string, tokenAPIServers []strin fmt.Printf("[discovery] Created cluster-info discovery client, requesting info from %q\n", bootstrapConfig.Clusters[clusterName].Server) var clusterinfo *v1.ConfigMap - wait.PollInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { + wait.PollImmediateInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { var err error clusterinfo, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { diff --git a/cmd/kubeadm/app/images/BUILD b/cmd/kubeadm/app/images/BUILD index 23002061ac5..6209daaf175 100644 --- a/cmd/kubeadm/app/images/BUILD +++ b/cmd/kubeadm/app/images/BUILD @@ -12,7 +12,10 @@ go_library( name = "go_default_library", srcs = ["images.go"], tags = ["automanaged"], - deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/util:go_default_library", + ], ) go_test( diff --git a/cmd/kubeadm/app/images/images.go b/cmd/kubeadm/app/images/images.go index 3f714c9390b..017201667bd 100644 --- a/cmd/kubeadm/app/images/images.go +++ b/cmd/kubeadm/app/images/images.go @@ -21,6 +21,7 @@ import ( "runtime" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) const ( @@ -37,12 +38,13 @@ func GetCoreImage(image string, cfg *kubeadmapi.MasterConfiguration, overrideIma if overrideImage != "" { return overrideImage } - repoPrefix := kubeadmapi.GlobalEnvParams.RepositoryPrefix + repoPrefix := cfg.ImageRepository + kubernetesImageTag := kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion) return map[string]string{ KubeEtcdImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "etcd", runtime.GOARCH, etcdVersion), - KubeAPIServerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-apiserver", runtime.GOARCH, cfg.KubernetesVersion), - KubeControllerManagerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-controller-manager", runtime.GOARCH, cfg.KubernetesVersion), - KubeSchedulerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-scheduler", runtime.GOARCH, cfg.KubernetesVersion), - KubeProxyImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-proxy", runtime.GOARCH, cfg.KubernetesVersion), + KubeAPIServerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-apiserver", runtime.GOARCH, kubernetesImageTag), + KubeControllerManagerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-controller-manager", runtime.GOARCH, kubernetesImageTag), + KubeSchedulerImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-scheduler", runtime.GOARCH, kubernetesImageTag), + KubeProxyImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, "kube-proxy", runtime.GOARCH, kubernetesImageTag), }[image] } diff --git a/cmd/kubeadm/app/images/images_test.go b/cmd/kubeadm/app/images/images_test.go index f01b8903c8d..cae27b18459 100644 --- a/cmd/kubeadm/app/images/images_test.go +++ b/cmd/kubeadm/app/images/images_test.go @@ -31,7 +31,8 @@ type getCoreImageTest struct { } const ( - testversion = "1" + testversion = "v10.1.2-alpha.1.100+0123456789abcdef+SOMETHING" + expected = "v10.1.2-alpha.1.100_0123456789abcdef_SOMETHING" gcrPrefix = "gcr.io/google_containers" ) @@ -43,28 +44,28 @@ func TestGetCoreImage(t *testing.T) { {getCoreImageTest{o: "override"}, "override"}, {getCoreImageTest{ i: KubeEtcdImage, - c: &kubeadmapi.MasterConfiguration{}}, + c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix}}, fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "etcd", runtime.GOARCH, etcdVersion), }, {getCoreImageTest{ i: KubeAPIServerImage, - c: &kubeadmapi.MasterConfiguration{KubernetesVersion: testversion}}, - fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-apiserver", runtime.GOARCH, testversion), + c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}}, + fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-apiserver", runtime.GOARCH, expected), }, {getCoreImageTest{ i: KubeControllerManagerImage, - c: &kubeadmapi.MasterConfiguration{KubernetesVersion: testversion}}, - fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-controller-manager", runtime.GOARCH, testversion), + c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}}, + fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-controller-manager", runtime.GOARCH, expected), }, {getCoreImageTest{ i: KubeSchedulerImage, - c: &kubeadmapi.MasterConfiguration{KubernetesVersion: testversion}}, - fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-scheduler", runtime.GOARCH, testversion), + c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}}, + fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-scheduler", runtime.GOARCH, expected), }, {getCoreImageTest{ i: KubeProxyImage, - c: &kubeadmapi.MasterConfiguration{KubernetesVersion: testversion}}, - fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-proxy", runtime.GOARCH, testversion), + c: &kubeadmapi.MasterConfiguration{ImageRepository: gcrPrefix, KubernetesVersion: testversion}}, + fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, "kube-proxy", runtime.GOARCH, expected), }, } for _, it := range imageTest { diff --git a/cmd/kubeadm/app/master/selfhosted.go b/cmd/kubeadm/app/master/selfhosted.go deleted file mode 100644 index 810b6a9468c..00000000000 --- a/cmd/kubeadm/app/master/selfhosted.go +++ /dev/null @@ -1,347 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package master - -import ( - "fmt" - "os" - "path/filepath" - "time" - - "k8s.io/api/core/v1" - ext "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/images" - "k8s.io/kubernetes/pkg/util/version" -) - -var ( - // maximum unavailable and surge instances per self-hosted component deployment - maxUnavailable = intstr.FromInt(0) - maxSurge = intstr.FromInt(1) -) - -func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error { - volumes := []v1.Volume{k8sVolume()} - volumeMounts := []v1.VolumeMount{k8sVolumeMount()} - if isCertsVolumeMountNeeded() { - volumes = append(volumes, certsVolume(cfg)) - volumeMounts = append(volumeMounts, certsVolumeMount()) - } - - if isPkiVolumeMountNeeded() { - volumes = append(volumes, pkiVolume()) - volumeMounts = append(volumeMounts, pkiVolumeMount()) - } - - // Need lock for self-hosted - volumes = append(volumes, flockVolume()) - volumeMounts = append(volumeMounts, flockVolumeMount()) - - if err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil { - return err - } - - if err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil { - return err - } - - if err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil { - return err - } - - return nil -} - -func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { - start := time.Now() - - kubeVersion, err := version.ParseSemantic(cfg.KubernetesVersion) - if err != nil { - return err - } - apiServer := getAPIServerDS(cfg, volumes, volumeMounts, kubeVersion) - if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(&apiServer); err != nil { - return fmt.Errorf("failed to create self-hosted %q daemon set [%v]", kubeAPIServer, err) - } - - wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { - // TODO: This might be pointless, checking the pods is probably enough. - // It does however get us a count of how many there should be which may be useful - // with HA. - apiDS, err := client.DaemonSets(metav1.NamespaceSystem).Get("self-hosted-"+kubeAPIServer, - metav1.GetOptions{}) - if err != nil { - fmt.Println("[self-hosted] error getting apiserver DaemonSet:", err) - return false, nil - } - fmt.Printf("[self-hosted] %s DaemonSet current=%d, desired=%d\n", - kubeAPIServer, - apiDS.Status.CurrentNumberScheduled, - apiDS.Status.DesiredNumberScheduled) - - if apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled { - return false, nil - } - - return true, nil - }) - - // Wait for self-hosted API server to take ownership - waitForPodsWithLabel(client, "self-hosted-"+kubeAPIServer, true) - - // Remove temporary API server - apiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer) - if err := os.RemoveAll(apiServerStaticManifestPath); err != nil { - return fmt.Errorf("unable to delete temporary API server manifest [%v]", err) - } - - WaitForAPI(client) - - fmt.Printf("[self-hosted] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds()) - return nil -} - -func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { - start := time.Now() - - kubeVersion, err := version.ParseSemantic(cfg.KubernetesVersion) - if err != nil { - return err - } - - ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts, kubeVersion) - if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&ctrlMgr); err != nil { - return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err) - } - - waitForPodsWithLabel(client, "self-hosted-"+kubeControllerManager, true) - - ctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager) - if err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil { - return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err) - } - - fmt.Printf("[self-hosted] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds()) - return nil - -} - -func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { - start := time.Now() - scheduler := getSchedulerDeployment(cfg, volumes, volumeMounts) - if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&scheduler); err != nil { - return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err) - } - - waitForPodsWithLabel(client, "self-hosted-"+kubeScheduler, true) - - schedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler) - if err := os.RemoveAll(schedulerStaticManifestPath); err != nil { - return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err) - } - - fmt.Printf("[self-hosted] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds()) - return nil -} - -// waitForPodsWithLabel will lookup pods with the given label and wait until they are all -// reporting status as running. -func waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) { - wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { - // TODO: Do we need a stronger label link than this? - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)} - apiPods, err := client.Pods(metav1.NamespaceSystem).List(listOpts) - if err != nil { - fmt.Printf("[self-hosted] error getting %s pods [%v]\n", appLabel, err) - return false, nil - } - fmt.Printf("[self-hosted] Found %d %s pods\n", len(apiPods.Items), appLabel) - - // TODO: HA - if int32(len(apiPods.Items)) != 1 { - return false, nil - } - for _, pod := range apiPods.Items { - fmt.Printf("[self-hosted] Pod %s status: %s\n", pod.Name, pod.Status.Phase) - if mustBeRunning && pod.Status.Phase != "Running" { - return false, nil - } - } - - return true, nil - }) -} - -// Sources from bootkube templates.go -func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount, kubeVersion *version.Version) ext.DaemonSet { - ds := ext.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "extensions/v1beta1", - Kind: "DaemonSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "self-hosted-" + kubeAPIServer, - Namespace: "kube-system", - Labels: map[string]string{"k8s-app": "self-hosted-" + kubeAPIServer}, - }, - Spec: ext.DaemonSetSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "k8s-app": "self-hosted-" + kubeAPIServer, - "component": kubeAPIServer, - "tier": "control-plane", - }, - }, - Spec: v1.PodSpec{ - NodeSelector: map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}, - HostNetwork: true, - Volumes: volumes, - Containers: []v1.Container{ - { - Name: "self-hosted-" + kubeAPIServer, - Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getAPIServerCommand(cfg, true, kubeVersion), - Env: getSelfHostedAPIServerEnv(), - VolumeMounts: volumeMounts, - LivenessProbe: componentProbe(6443, "/healthz", v1.URISchemeHTTPS), - Resources: componentResources("250m"), - }, - }, - Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration}, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - }, - }, - }, - } - return ds -} - -func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount, kubeVersion *version.Version) ext.Deployment { - d := ext.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "extensions/v1beta1", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "self-hosted-" + kubeControllerManager, - Namespace: "kube-system", - Labels: map[string]string{"k8s-app": "self-hosted-" + kubeControllerManager}, - }, - Spec: ext.DeploymentSpec{ - // TODO bootkube uses 2 replicas - Strategy: ext.DeploymentStrategy{ - Type: ext.RollingUpdateDeploymentStrategyType, - RollingUpdate: &ext.RollingUpdateDeployment{ - MaxUnavailable: &maxUnavailable, - MaxSurge: &maxSurge, - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "k8s-app": "self-hosted-" + kubeControllerManager, - "component": kubeControllerManager, - "tier": "control-plane", - }, - }, - Spec: v1.PodSpec{ - NodeSelector: map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}, - HostNetwork: true, - Volumes: volumes, - Containers: []v1.Container{ - { - Name: "self-hosted-" + kubeControllerManager, - Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getControllerManagerCommand(cfg, true, kubeVersion), - VolumeMounts: volumeMounts, - LivenessProbe: componentProbe(10252, "/healthz", v1.URISchemeHTTP), - Resources: componentResources("200m"), - Env: getProxyEnvVars(), - }, - }, - Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration}, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - }, - }, - }, - } - return d -} - -func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment { - d := ext.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "extensions/v1beta1", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "self-hosted-" + kubeScheduler, - Namespace: "kube-system", - Labels: map[string]string{"k8s-app": "self-hosted-" + kubeScheduler}, - }, - Spec: ext.DeploymentSpec{ - // TODO bootkube uses 2 replicas - Strategy: ext.DeploymentStrategy{ - Type: ext.RollingUpdateDeploymentStrategyType, - RollingUpdate: &ext.RollingUpdateDeployment{ - MaxUnavailable: &maxUnavailable, - MaxSurge: &maxSurge, - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "k8s-app": "self-hosted-" + kubeScheduler, - "component": kubeScheduler, - "tier": "control-plane", - }, - }, - Spec: v1.PodSpec{ - NodeSelector: map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}, - HostNetwork: true, - Volumes: volumes, - Containers: []v1.Container{ - { - Name: "self-hosted-" + kubeScheduler, - Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getSchedulerCommand(cfg, true), - VolumeMounts: volumeMounts, - LivenessProbe: componentProbe(10251, "/healthz", v1.URISchemeHTTP), - Resources: componentResources("100m"), - Env: getProxyEnvVars(), - }, - }, - Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration}, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - }, - }, - }, - } - - return d -} - -func buildStaticManifestFilepath(name string) string { - return filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.ManifestsSubDirName, name+".yaml") -} diff --git a/cmd/kubeadm/app/phases/addons/BUILD b/cmd/kubeadm/app/phases/addons/BUILD index ef170f28cc7..65a295a2e9d 100644 --- a/cmd/kubeadm/app/phases/addons/BUILD +++ b/cmd/kubeadm/app/phases/addons/BUILD @@ -21,6 +21,7 @@ go_library( "//cmd/kubeadm/app/images:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//pkg/api:go_default_library", + "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/cmd/kubeadm/app/phases/addons/addons.go b/cmd/kubeadm/app/phases/addons/addons.go index 4963f2f65f4..dd78e2e7378 100644 --- a/cmd/kubeadm/app/phases/addons/addons.go +++ b/cmd/kubeadm/app/phases/addons/addons.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/images" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) // CreateEssentialAddons creates the kube-proxy and kube-dns addons @@ -44,17 +45,18 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) } - proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ Image, ClusterCIDR, MasterTaintKey string }{ - Image: images.GetCoreImage("proxy", cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ Image, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ + Image: images.GetCoreImage("proxy", cfg, cfg.UnifiedControlPlaneImage), ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet), MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, + CloudTaintKey: algorithm.TaintExternalCloudProvider, }) if err != nil { return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) } dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct{ ImageRepository, Arch, Version, DNSDomain, MasterTaintKey string }{ - ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix, + ImageRepository: cfg.ImageRepository, Arch: runtime.GOARCH, Version: KubeDNSVersion, DNSDomain: cfg.Networking.DNSDomain, @@ -110,7 +112,6 @@ func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clients if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil { return fmt.Errorf("unable to decode kube-proxy daemonset %v", err) } - kubeproxyDaemonSet.Spec.Template.Spec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration} if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Create(kubeproxyDaemonSet); err != nil { if !apierrors.IsAlreadyExists(err) { @@ -129,13 +130,6 @@ func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client *clientset. if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil { return fmt.Errorf("unable to decode kube-dns deployment %v", err) } - kubednsDeployment.Spec.Template.Spec.Tolerations = []v1.Toleration{ - kubeadmconstants.MasterToleration, - { - Key: "CriticalAddonsOnly", - Operator: "Exists", - }, - } if _, err := client.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).Create(kubednsDeployment); err != nil { if !apierrors.IsAlreadyExists(err) { diff --git a/cmd/kubeadm/app/phases/addons/addons_test.go b/cmd/kubeadm/app/phases/addons/addons_test.go index f1189859c21..82227632267 100644 --- a/cmd/kubeadm/app/phases/addons/addons_test.go +++ b/cmd/kubeadm/app/phases/addons/addons_test.go @@ -54,10 +54,11 @@ func TestCompileManifests(t *testing.T) { }, { manifest: KubeProxyDaemonSet, - data: struct{ Image, ClusterCIDR, MasterTaintKey string }{ + data: struct{ Image, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ Image: "foo", ClusterCIDR: "foo", MasterTaintKey: "foo", + CloudTaintKey: "foo", }, expected: true, }, diff --git a/cmd/kubeadm/app/phases/addons/manifests.go b/cmd/kubeadm/app/phases/addons/manifests.go index 27be7137892..339fbd83b14 100644 --- a/cmd/kubeadm/app/phases/addons/manifests.go +++ b/cmd/kubeadm/app/phases/addons/manifests.go @@ -85,10 +85,12 @@ spec: readOnly: false hostNetwork: true serviceAccountName: kube-proxy - # TODO: Why doesn't the Decoder recognize this new field and decode it properly? Right now it's ignored - # tolerations: - # - key: {{ .MasterTaintKey }} - # effect: NoSchedule + tolerations: + - key: {{ .MasterTaintKey }} + effect: NoSchedule + - key: {{ .CloudTaintKey }} + value: "true" + effect: NoSchedule volumes: - name: kube-proxy configMap: @@ -101,7 +103,6 @@ spec: KubeDNSVersion = "1.14.4" KubeDNSDeployment = ` - apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -125,8 +126,6 @@ spec: metadata: labels: k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' spec: volumes: - name: kube-dns-config @@ -252,12 +251,11 @@ spec: cpu: 10m dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns - # TODO: Why doesn't the Decoder recognize this new field and decode it properly? Right now it's ignored - # tolerations: - # - key: CriticalAddonsOnly - # operator: Exists - # - key: {{ .MasterTaintKey }} - # effect: NoSchedule + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: {{ .MasterTaintKey }} + effect: NoSchedule # TODO: Remove this affinity field as soon as we are using manifest lists affinity: nodeAffinity: diff --git a/cmd/kubeadm/app/phases/apiconfig/BUILD b/cmd/kubeadm/app/phases/apiconfig/BUILD index a36f6b63036..ed667728dc4 100644 --- a/cmd/kubeadm/app/phases/apiconfig/BUILD +++ b/cmd/kubeadm/app/phases/apiconfig/BUILD @@ -45,7 +45,6 @@ go_library( "//pkg/apis/rbac/v1beta1:go_default_library", "//pkg/bootstrap/api:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/util/node:go_default_library", "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", diff --git a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go index 64d624a3b17..91a7b28b392 100644 --- a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go +++ b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go @@ -105,7 +105,7 @@ func createRoles(clientset *clientset.Clientset) error { Namespace: metav1.NamespacePublic, }, Rules: []rbac.PolicyRule{ - rbachelper.NewRule("get").Groups("").Resources("configmaps").RuleOrDie(), + rbachelper.NewRule("get").Groups("").Resources("configmaps").Names("cluster-info").RuleOrDie(), }, }, } diff --git a/cmd/kubeadm/app/phases/apiconfig/setupmaster.go b/cmd/kubeadm/app/phases/apiconfig/setupmaster.go index ac7387a2920..2c69fff8117 100644 --- a/cmd/kubeadm/app/phases/apiconfig/setupmaster.go +++ b/cmd/kubeadm/app/phases/apiconfig/setupmaster.go @@ -30,20 +30,19 @@ import ( clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/util/node" ) const apiCallRetryInterval = 500 * time.Millisecond // TODO: Can we think of any unit tests here? Or should this code just be covered through integration/e2e tests? -func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { +func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, nodeName string) error { var n *v1.Node // Wait for current node registration wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { var err error - if n, err = client.Nodes().Get(node.GetHostname(""), metav1.GetOptions{}); err != nil { + if n, err = client.Nodes().Get(nodeName, metav1.GetOptions{}); err != nil { return false, nil } // The node may appear to have no labels at first, @@ -75,7 +74,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error if apierrs.IsConflict(err) { fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)") time.Sleep(apiCallRetryInterval) - attemptToUpdateMasterRoleLabelsAndTaints(client) + attemptToUpdateMasterRoleLabelsAndTaints(client, nodeName) } else { return err } @@ -95,9 +94,9 @@ func addTaintIfNotExists(n *v1.Node, t v1.Taint) { } // UpdateMasterRoleLabelsAndTaints taints the master and sets the master label -func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { +func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, nodeName string) error { // TODO: Use iterate instead of recursion - err := attemptToUpdateMasterRoleLabelsAndTaints(client) + err := attemptToUpdateMasterRoleLabelsAndTaints(client, nodeName) if err != nil { return fmt.Errorf("failed to update master node - [%v]", err) } diff --git a/cmd/kubeadm/app/phases/apiconfig/setupmaster_test.go b/cmd/kubeadm/app/phases/apiconfig/setupmaster_test.go index e813018088e..c1678a247b1 100644 --- a/cmd/kubeadm/app/phases/apiconfig/setupmaster_test.go +++ b/cmd/kubeadm/app/phases/apiconfig/setupmaster_test.go @@ -130,7 +130,7 @@ func TestUpdateMasterRoleLabelsAndTaints(t *testing.T) { t.Fatalf("UpdateMasterRoleLabelsAndTaints(%s): unexpected error building clientset: %v", tc.name, err) } - err = UpdateMasterRoleLabelsAndTaints(cs) + err = UpdateMasterRoleLabelsAndTaints(cs, hostname) if err != nil { t.Errorf("UpdateMasterRoleLabelsAndTaints(%s) returned unexpected error: %v", tc.name, err) } diff --git a/cmd/kubeadm/app/phases/certs/BUILD b/cmd/kubeadm/app/phases/certs/BUILD index 97ceec717b4..cc99563df8a 100644 --- a/cmd/kubeadm/app/phases/certs/BUILD +++ b/cmd/kubeadm/app/phases/certs/BUILD @@ -15,7 +15,7 @@ go_test( tags = ["automanaged"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", ], ) @@ -31,7 +31,6 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", ], diff --git a/cmd/kubeadm/app/phases/certs/certs.go b/cmd/kubeadm/app/phases/certs/certs.go index a19f7e60452..0fc6b5b27fa 100644 --- a/cmd/kubeadm/app/phases/certs/certs.go +++ b/cmd/kubeadm/app/phases/certs/certs.go @@ -21,9 +21,7 @@ import ( "crypto/x509" "fmt" "net" - "os" - setutil "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" certutil "k8s.io/client-go/util/cert" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -32,257 +30,133 @@ import ( "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" ) -// TODO: Integration test cases -// no files exist => create all four files -// valid ca.{crt,key} exists => create apiserver.{crt,key} -// valid ca.{crt,key} and apiserver.{crt,key} exists => do nothing -// invalid ca.{crt,key} exists => error -// only one of the .crt or .key file exists => error +// NewCACertAndKey will generate a self signed CA. +func NewCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) { -// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane. -// It generates a self-signed CA certificate and a server certificate (signed by the CA) -func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error { - pkiDir := cfg.CertificatesDir - hostname, err := os.Hostname() + caCert, caKey, err := pkiutil.NewCertificateAuthority() if err != nil { - return fmt.Errorf("couldn't get the hostname: %v", err) + return nil, nil, fmt.Errorf("failure while generating CA certificate and key: %v", err) } + return caCert, caKey, nil +} + +// NewAPIServerCertAndKey generate CA certificate for apiserver, signed by the given CA. +func NewAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) { + + altNames, err := getAltNames(cfg) + if err != nil { + return nil, nil, fmt.Errorf("failure while composing altnames for API server: %v", err) + } + + config := certutil.Config{ + CommonName: kubeadmconstants.APIServerCertCommonName, + AltNames: *altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } + apiCert, apiKey, err := pkiutil.NewCertAndKey(caCert, caKey, config) + if err != nil { + return nil, nil, fmt.Errorf("failure while creating API server key and certificate: %v", err) + } + + return apiCert, apiKey, nil +} + +// NewAPIServerKubeletClientCertAndKey generate CA certificate for the apiservers to connect to the kubelets securely, signed by the given CA. +func NewAPIServerKubeletClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) { + + config := certutil.Config{ + CommonName: kubeadmconstants.APIServerKubeletClientCertCommonName, + Organization: []string{kubeadmconstants.MastersGroup}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config) + if err != nil { + return nil, nil, fmt.Errorf("failure while creating API server kubelet client key and certificate: %v", err) + } + + return apiClientCert, apiClientKey, nil +} + +// NewServiceAccountSigningKey generate public/private key pairs for signing service account tokens. +func NewServiceAccountSigningKey() (*rsa.PrivateKey, error) { + + // The key does NOT exist, let's generate it now + saSigningKey, err := certutil.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failure while creating service account token signing key: %v", err) + } + + return saSigningKey, nil +} + +// NewFrontProxyCACertAndKey generate a self signed front proxy CA. +// Front proxy CA and client certs are used to secure a front proxy authenticator which is used to assert identity +// without the client cert. +// This is a separte CA, so that front proxy identities cannot hit the API and normal client certs cannot be used +// as front proxies. +func NewFrontProxyCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) { + + frontProxyCACert, frontProxyCAKey, err := pkiutil.NewCertificateAuthority() + if err != nil { + return nil, nil, fmt.Errorf("failure while generating front-proxy CA certificate and key: %v", err) + } + + return frontProxyCACert, frontProxyCAKey, nil +} + +// NewFrontProxyClientCertAndKey generate CA certificate for proxy server client, signed by the given front proxy CA. +func NewFrontProxyClientCertAndKey(frontProxyCACert *x509.Certificate, frontProxyCAKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) { + + config := certutil.Config{ + CommonName: kubeadmconstants.FrontProxyClientCertCommonName, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + frontProxyClientCert, frontProxyClientKey, err := pkiutil.NewCertAndKey(frontProxyCACert, frontProxyCAKey, config) + if err != nil { + return nil, nil, fmt.Errorf("failure while creating front-proxy client key and certificate: %v", err) + } + + return frontProxyClientCert, frontProxyClientKey, nil +} + +// getAltNames builds an AltNames object for to be used when generating apiserver certificate +func getAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) { + + // advertise address + advertiseAddress := net.ParseIP(cfg.API.AdvertiseAddress) + if advertiseAddress == nil { + return nil, fmt.Errorf("error parsing API AdvertiseAddress %v: is not a valid textual representation of an IP address", cfg.API.AdvertiseAddress) + } + + // internal IP address for the API server _, svcSubnet, err := net.ParseCIDR(cfg.Networking.ServiceSubnet) if err != nil { - return fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err) + return nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err) } - // Build the list of SANs - altNames := getAltNames(cfg.APIServerCertSANs, hostname, cfg.Networking.DNSDomain, svcSubnet) - // Append the address the API Server is advertising - altNames.IPs = append(altNames.IPs, net.ParseIP(cfg.API.AdvertiseAddress)) - - var caCert *x509.Certificate - var caKey *rsa.PrivateKey - // If at least one of them exists, we should try to load them - // In the case that only one exists, there will most likely be an error anyway - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.CACertAndKeyBaseName) { - // Try to load ca.crt and ca.key from the PKI directory - caCert, caKey, err = pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.CACertAndKeyBaseName) - if err != nil || caCert == nil || caKey == nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly") - } - - // The certificate and key could be loaded, but the certificate is not a CA - if !caCert.IsCA { - return fmt.Errorf("certificate and key could be loaded but the certificate is not a CA") - } - - fmt.Println("[certificates] Using the existing CA certificate and key.") - } else { - // The certificate and the key did NOT exist, let's generate them now - caCert, caKey, err = pkiutil.NewCertificateAuthority() - if err != nil { - return fmt.Errorf("failure while generating CA certificate and key [%v]", err) - } - - if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.CACertAndKeyBaseName, caCert, caKey); err != nil { - return fmt.Errorf("failure while saving CA certificate and key [%v]", err) - } - fmt.Println("[certificates] Generated CA certificate and key.") + internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1) + if err != nil { + return nil, fmt.Errorf("unable to get first IP address from the given CIDR (%s): %v", svcSubnet.String(), err) } - // If at least one of them exists, we should try to load them - // In the case that only one exists, there will most likely be an error anyway - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName) { - // Try to load apiserver.crt and apiserver.key from the PKI directory - apiCert, apiKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName) - if err != nil || apiCert == nil || apiKey == nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly") - } - - fmt.Println("[certificates] Using the existing API Server certificate and key.") - } else { - // The certificate and the key did NOT exist, let's generate them now - // TODO: Add a test case to verify that this cert has the x509.ExtKeyUsageServerAuth flag - config := certutil.Config{ - CommonName: "kube-apiserver", - AltNames: altNames, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - } - apiCert, apiKey, err := pkiutil.NewCertAndKey(caCert, caKey, config) - if err != nil { - return fmt.Errorf("failure while creating API server key and certificate [%v]", err) - } - - if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName, apiCert, apiKey); err != nil { - return fmt.Errorf("failure while saving API server certificate and key [%v]", err) - } - fmt.Println("[certificates] Generated API server certificate and key.") - fmt.Printf("[certificates] API Server serving cert is signed for DNS names %v and IPs %v\n", altNames.DNSNames, altNames.IPs) - } - - // If at least one of them exists, we should try to load them - // In the case that only one exists, there will most likely be an error anyway - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName) { - // Try to load apiserver-kubelet-client.crt and apiserver-kubelet-client.key from the PKI directory - apiCert, apiKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName) - if err != nil || apiCert == nil || apiKey == nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly") - } - - fmt.Println("[certificates] Using the existing API Server kubelet client certificate and key.") - } else { - // The certificate and the key did NOT exist, let's generate them now - // TODO: Add a test case to verify that this cert has the x509.ExtKeyUsageClientAuth flag - config := certutil.Config{ - CommonName: "kube-apiserver-kubelet-client", - Organization: []string{kubeadmconstants.MastersGroup}, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config) - if err != nil { - return fmt.Errorf("failure while creating API server kubelet client key and certificate [%v]", err) - } - - if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, apiClientCert, apiClientKey); err != nil { - return fmt.Errorf("failure while saving API server kubelet client certificate and key [%v]", err) - } - fmt.Println("[certificates] Generated API server kubelet client certificate and key.") - } - - // If the key exists, we should try to load it - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.ServiceAccountKeyBaseName) { - // Try to load sa.key from the PKI directory - _, err := pkiutil.TryLoadKeyFromDisk(pkiDir, kubeadmconstants.ServiceAccountKeyBaseName) - if err != nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly [%v]", err) - } - - fmt.Println("[certificates] Using the existing service account token signing key.") - } else { - // The key does NOT exist, let's generate it now - saTokenSigningKey, err := certutil.NewPrivateKey() - if err != nil { - return fmt.Errorf("failure while creating service account token signing key [%v]", err) - } - - if err = pkiutil.WriteKey(pkiDir, kubeadmconstants.ServiceAccountKeyBaseName, saTokenSigningKey); err != nil { - return fmt.Errorf("failure while saving service account token signing key [%v]", err) - } - - if err = pkiutil.WritePublicKey(pkiDir, kubeadmconstants.ServiceAccountKeyBaseName, &saTokenSigningKey.PublicKey); err != nil { - return fmt.Errorf("failure while saving service account token signing public key [%v]", err) - } - fmt.Println("[certificates] Generated service account token signing key and public key.") - } - - // front proxy CA and client certs are used to secure a front proxy authenticator which is used to assert identity - // without the client cert, you cannot make use of the front proxy and the kube-aggregator uses this connection - // so we generate and enable it unconditionally - // This is a separte CA, so that front proxy identities cannot hit the API and normal client certs cannot be used - // as front proxies. - var frontProxyCACert *x509.Certificate - var frontProxyCAKey *rsa.PrivateKey - // If at least one of them exists, we should try to load them - // In the case that only one exists, there will most likely be an error anyway - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName) { - // Try to load front-proxy-ca.crt and front-proxy-ca.key from the PKI directory - frontProxyCACert, frontProxyCAKey, err = pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName) - if err != nil || frontProxyCACert == nil || frontProxyCAKey == nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly") - } - - // The certificate and key could be loaded, but the certificate is not a CA - if !frontProxyCACert.IsCA { - return fmt.Errorf("certificate and key could be loaded but the certificate is not a CA") - } - - fmt.Println("[certificates] Using the existing front-proxy CA certificate and key.") - } else { - // The certificate and the key did NOT exist, let's generate them now - frontProxyCACert, frontProxyCAKey, err = pkiutil.NewCertificateAuthority() - if err != nil { - return fmt.Errorf("failure while generating front-proxy CA certificate and key [%v]", err) - } - - if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, frontProxyCACert, frontProxyCAKey); err != nil { - return fmt.Errorf("failure while saving front-proxy CA certificate and key [%v]", err) - } - fmt.Println("[certificates] Generated front-proxy CA certificate and key.") - } - - // At this point we have a front proxy CA signing key. We can use that create the front proxy client cert if - // it doesn't already exist. - // If at least one of them exists, we should try to load them - // In the case that only one exists, there will most likely be an error anyway - if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.FrontProxyClientCertAndKeyBaseName) { - // Try to load apiserver-kubelet-client.crt and apiserver-kubelet-client.key from the PKI directory - apiCert, apiKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.FrontProxyClientCertAndKeyBaseName) - if err != nil || apiCert == nil || apiKey == nil { - return fmt.Errorf("certificate and/or key existed but they could not be loaded properly") - } - - fmt.Println("[certificates] Using the existing front-proxy client certificate and key.") - } else { - // The certificate and the key did NOT exist, let's generate them now - // TODO: Add a test case to verify that this cert has the x509.ExtKeyUsageClientAuth flag - config := certutil.Config{ - CommonName: "front-proxy-client", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(frontProxyCACert, frontProxyCAKey, config) - if err != nil { - return fmt.Errorf("failure while creating front-proxy client key and certificate [%v]", err) - } - - if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.FrontProxyClientCertAndKeyBaseName, apiClientCert, apiClientKey); err != nil { - return fmt.Errorf("failure while saving front-proxy client certificate and key [%v]", err) - } - fmt.Println("[certificates] Generated front-proxy client certificate and key.") - } - - fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", pkiDir) - - return nil -} - -// checkAltNamesExist verifies that the cert is valid for all IPs and DNS names it should be valid for -func checkAltNamesExist(IPs []net.IP, DNSNames []string, altNames certutil.AltNames) bool { - dnsset := setutil.NewString(DNSNames...) - - for _, dnsNameThatShouldExist := range altNames.DNSNames { - if !dnsset.Has(dnsNameThatShouldExist) { - return false - } - } - - for _, ipThatShouldExist := range altNames.IPs { - found := false - for _, ip := range IPs { - if ip.Equal(ipThatShouldExist) { - found = true - break - } - } - - if !found { - return false - } - } - return true -} - -// getAltNames builds an AltNames object for the certutil to use when generating the certificates -func getAltNames(cfgAltNames []string, hostname, dnsdomain string, svcSubnet *net.IPNet) certutil.AltNames { - altNames := certutil.AltNames{ + // create AltNames with defaults DNSNames/IPs + altNames := &certutil.AltNames{ DNSNames: []string{ - hostname, + cfg.NodeName, "kubernetes", "kubernetes.default", "kubernetes.default.svc", - fmt.Sprintf("kubernetes.default.svc.%s", dnsdomain), + fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain), + }, + IPs: []net.IP{ + internalAPIServerVirtualIP, + advertiseAddress, }, } - // Populate IPs/DNSNames from AltNames - for _, altname := range cfgAltNames { + // adds additional SAN + for _, altname := range cfg.APIServerCertSANs { if ip := net.ParseIP(altname); ip != nil { altNames.IPs = append(altNames.IPs, ip) } else if len(validation.IsDNS1123Subdomain(altname)) == 0 { @@ -290,11 +164,5 @@ func getAltNames(cfgAltNames []string, hostname, dnsdomain string, svcSubnet *ne } } - // and lastly, extract the internal IP address for the API server - internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1) - if err != nil { - fmt.Printf("[certs] WARNING: Unable to get first IP address from the given CIDR (%s): %v\n", svcSubnet.String(), err) - } - altNames.IPs = append(altNames.IPs, internalAPIServerVirtualIP) - return altNames + return altNames, nil } diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index 8f6bdbaed0e..d4f03ce6971 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -17,158 +17,149 @@ limitations under the License. package certs import ( - "fmt" - "io/ioutil" + "crypto/x509" "net" - "os" "testing" - certutil "k8s.io/client-go/util/cert" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) -func TestCreatePKIAssets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "") +func TestNewCACertAndKey(t *testing.T) { + caCert, _, err := NewCACertAndKey() if err != nil { - t.Fatalf("Couldn't create tmpdir") + t.Fatalf("failed call NewCACertAndKey: %v", err) } - defer os.RemoveAll(tmpdir) - var tests = []struct { - cfg *kubeadmapi.MasterConfiguration - expected bool - }{ - { - cfg: &kubeadmapi.MasterConfiguration{}, - expected: false, - }, - { - // CIDR too small - cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.0.0.1/1"}, - CertificatesDir: fmt.Sprintf("%s/etc/kubernetes/pki", tmpdir), - }, - expected: false, - }, - { - // CIDR invalid - cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "invalid"}, - CertificatesDir: fmt.Sprintf("%s/etc/kubernetes/pki", tmpdir), - }, - expected: false, - }, - { - cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.0.0.1/24"}, - CertificatesDir: fmt.Sprintf("%s/etc/kubernetes/pki", tmpdir), - }, - expected: true, - }, + assertIsCa(t, caCert) +} + +func TestNewAPIServerCertAndKey(t *testing.T) { + hostname := "valid-hostname" + + advertiseIP := "1.2.3.4" + cfg := &kubeadmapi.MasterConfiguration{ + API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeName: "valid-hostname", } - for _, rt := range tests { - actual := CreatePKIAssets(rt.cfg) - if (actual == nil) != rt.expected { - t.Errorf( - "failed CreatePKIAssets with an error:\n\texpected: %t\n\t actual: %t", - rt.expected, - (actual == nil), - ) - } + caCert, caKey, err := NewCACertAndKey() + + apiServerCert, _, err := NewAPIServerCertAndKey(cfg, caCert, caKey) + if err != nil { + t.Fatalf("failed creation of cert and key: %v", err) + } + + assertIsSignedByCa(t, apiServerCert, caCert) + assertHasServerAuth(t, apiServerCert) + + for _, DNSName := range []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local"} { + assertHasDNSNames(t, apiServerCert, DNSName) + } + for _, IPAddress := range []string{"10.96.0.1", advertiseIP} { + assertHasIPAddresses(t, apiServerCert, net.ParseIP(IPAddress)) } } -func TestCheckAltNamesExist(t *testing.T) { - var tests = []struct { - IPs []net.IP - DNSNames []string - requiredAltNames certutil.AltNames - succeed bool - }{ - { - // equal - requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}}, - IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, - DNSNames: []string{"foo", "bar", "baz"}, - succeed: true, - }, - { - // the loaded cert has more ips than required, ok - requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}}, - IPs: []net.IP{net.ParseIP("192.168.2.5"), net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, - DNSNames: []string{"a", "foo", "b", "bar", "baz"}, - succeed: true, - }, - { - // the loaded cert doesn't have all ips - requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.2.5"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}}, - IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, - DNSNames: []string{"foo", "bar", "baz"}, - succeed: false, - }, - { - // the loaded cert doesn't have all ips - requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "b", "baz"}}, - IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, - DNSNames: []string{"foo", "bar", "baz"}, - succeed: false, - }, +func TestNewAPIServerKubeletClientCertAndKey(t *testing.T) { + caCert, caKey, err := NewCACertAndKey() + + apiClientCert, _, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey) + if err != nil { + t.Fatalf("failed creation of cert and key: %v", err) } - for _, rt := range tests { - succeeded := checkAltNamesExist(rt.IPs, rt.DNSNames, rt.requiredAltNames) - if succeeded != rt.succeed { - t.Errorf( - "failed checkAltNamesExist:\n\texpected: %t\n\t actual: %t", - rt.succeed, - succeeded, - ) - } + assertIsSignedByCa(t, apiClientCert, caCert) + assertHasClientAuth(t, apiClientCert) + assertHasOrganization(t, apiClientCert, constants.MastersGroup) +} + +func TestNewNewServiceAccountSigningKey(t *testing.T) { + + key, err := NewServiceAccountSigningKey() + if err != nil { + t.Fatalf("failed creation of key: %v", err) + } + + if key.N.BitLen() < 2048 { + t.Error("Service account signing key has less than 2048 bits size") } } -func TestGetAltNames(t *testing.T) { - var tests = []struct { - cfgaltnames []string - hostname string - dnsdomain string - servicecidr string - expectedIPs []string - expectedDNSNames []string - }{ - { - cfgaltnames: []string{"foo", "192.168.200.1", "bar.baz"}, - hostname: "my-node", - dnsdomain: "cluster.external", - servicecidr: "10.96.0.1/12", - expectedIPs: []string{"192.168.200.1", "10.96.0.1"}, - expectedDNSNames: []string{"my-node", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.external", "foo", "bar.baz"}, - }, +func TestNewFrontProxyCACertAndKey(t *testing.T) { + frontProxyCACert, _, err := NewFrontProxyCACertAndKey() + if err != nil { + t.Fatalf("failed creation of cert and key: %v", err) } - for _, rt := range tests { - _, svcSubnet, _ := net.ParseCIDR(rt.servicecidr) - actual := getAltNames(rt.cfgaltnames, rt.hostname, rt.dnsdomain, svcSubnet) - for i := range actual.IPs { - if rt.expectedIPs[i] != actual.IPs[i].String() { - t.Errorf( - "failed getAltNames:\n\texpected: %s\n\t actual: %s", - rt.expectedIPs[i], - actual.IPs[i].String(), - ) - } - } - for i := range actual.DNSNames { - if rt.expectedDNSNames[i] != actual.DNSNames[i] { - t.Errorf( - "failed getAltNames:\n\texpected: %s\n\t actual: %s", - rt.expectedDNSNames[i], - actual.DNSNames[i], - ) - } - } + assertIsCa(t, frontProxyCACert) +} + +func TestNewFrontProxyClientCertAndKey(t *testing.T) { + frontProxyCACert, frontProxyCAKey, err := NewFrontProxyCACertAndKey() + + frontProxyClientCert, _, err := NewFrontProxyClientCertAndKey(frontProxyCACert, frontProxyCAKey) + if err != nil { + t.Fatalf("failed creation of cert and key: %v", err) + } + + assertIsSignedByCa(t, frontProxyClientCert, frontProxyCACert) + assertHasClientAuth(t, frontProxyClientCert) +} + +func assertIsCa(t *testing.T, cert *x509.Certificate) { + if !cert.IsCA { + t.Error("cert is not a valida CA") } } + +func assertIsSignedByCa(t *testing.T, cert *x509.Certificate, ca *x509.Certificate) { + if err := cert.CheckSignatureFrom(ca); err != nil { + t.Error("cert is not signed by ca") + } +} + +func assertHasClientAuth(t *testing.T, cert *x509.Certificate) { + for i := range cert.ExtKeyUsage { + if cert.ExtKeyUsage[i] == x509.ExtKeyUsageClientAuth { + return + } + } + t.Error("cert is not a ClientAuth") +} + +func assertHasServerAuth(t *testing.T, cert *x509.Certificate) { + for i := range cert.ExtKeyUsage { + if cert.ExtKeyUsage[i] == x509.ExtKeyUsageServerAuth { + return + } + } + t.Error("cert is not a ServerAuth") +} + +func assertHasOrganization(t *testing.T, cert *x509.Certificate, OU string) { + for i := range cert.Subject.Organization { + if cert.Subject.Organization[i] == OU { + return + } + } + t.Errorf("cert does not contain OU %s", OU) +} + +func assertHasDNSNames(t *testing.T, cert *x509.Certificate, DNSName string) { + for i := range cert.DNSNames { + if cert.DNSNames[i] == DNSName { + return + } + } + t.Errorf("cert does not contain DNSName %s", DNSName) +} + +func assertHasIPAddresses(t *testing.T, cert *x509.Certificate, IPAddress net.IP) { + for i := range cert.IPAddresses { + if cert.IPAddresses[i].Equal(IPAddress) { + return + } + } + t.Errorf("cert does not contain IPAddress %s", IPAddress) +} diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go index d9587bf6924..fcfd07b8876 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go @@ -61,6 +61,16 @@ func NewCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey, config certu return cert, key, nil } +// HasServerAuth returns true if the given certificate is a ServerAuth +func HasServerAuth(cert *x509.Certificate) bool { + for i := range cert.ExtKeyUsage { + if cert.ExtKeyUsage[i] == x509.ExtKeyUsageServerAuth { + return true + } + } + return false +} + func WriteCertAndKey(pkiPath string, name string, cert *x509.Certificate, key *rsa.PrivateKey) error { if err := WriteKey(pkiPath, name, key); err != nil { return err diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go index 75004cc65f3..34ac26ee14c 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go @@ -87,6 +87,45 @@ func TestNewCertAndKey(t *testing.T) { } } +func TestHasServerAuth(t *testing.T) { + caCert, caKey, _ := NewCertificateAuthority() + + var tests = []struct { + config certutil.Config + expected bool + }{ + { + config: certutil.Config{ + CommonName: "test", + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, + expected: true, + }, + { + config: certutil.Config{ + CommonName: "test", + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + expected: false, + }, + } + + for _, rt := range tests { + cert, _, err := NewCertAndKey(caCert, caKey, rt.config) + if err != nil { + t.Fatalf("Couldn't create cert: %v", err) + } + actual := HasServerAuth(cert) + if actual != rt.expected { + t.Errorf( + "failed HasServerAuth:\n\texpected: %t\n\t actual: %t", + rt.expected, + actual, + ) + } + } +} + func TestWriteCertAndKey(t *testing.T) { tmpdir, err := ioutil.TempDir("", "") if err != nil { diff --git a/cmd/kubeadm/app/master/BUILD b/cmd/kubeadm/app/phases/controlplane/BUILD similarity index 82% rename from cmd/kubeadm/app/master/BUILD rename to cmd/kubeadm/app/phases/controlplane/BUILD index e7db78a80e2..83ef59764a3 100644 --- a/cmd/kubeadm/app/master/BUILD +++ b/cmd/kubeadm/app/phases/controlplane/BUILD @@ -8,37 +8,6 @@ load( "go_test", ) -go_library( - name = "go_default_library", - srcs = [ - "apiclient.go", - "manifests.go", - "selfhosted.go", - ], - tags = ["automanaged"], - deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", - "//cmd/kubeadm/app/constants:go_default_library", - "//cmd/kubeadm/app/images:go_default_library", - "//cmd/kubeadm/app/util/kubeconfig:go_default_library", - "//pkg/bootstrap/api:go_default_library", - "//pkg/kubeapiserver/authorizer/modes:go_default_library", - "//pkg/kubectl/cmd/util:go_default_library", - "//pkg/kubelet/types:go_default_library", - "//pkg/util/version:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) - go_test( name = "go_default_test", srcs = ["manifests_test.go"], @@ -54,6 +23,28 @@ go_test( ], ) +go_library( + name = "go_default_library", + srcs = ["manifests.go"], + tags = ["automanaged"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/images:go_default_library", + "//pkg/kubeapiserver/authorizer/modes:go_default_library", + "//pkg/kubectl/cmd/util:go_default_library", + "//pkg/kubelet/types:go_default_library", + "//pkg/util/version:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/cmd/kubeadm/app/master/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go similarity index 71% rename from cmd/kubeadm/app/master/manifests.go rename to cmd/kubeadm/app/phases/controlplane/manifests.go index f047b8c082b..12d2bb7cd21 100644 --- a/cmd/kubeadm/app/master/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package master +package controlplane import ( "bytes" @@ -25,7 +25,7 @@ import ( "github.com/ghodss/yaml" - api "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -34,7 +34,6 @@ import ( kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/images" - bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -48,21 +47,16 @@ const ( defaultv17AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota" etcd = "etcd" - apiServer = "apiserver" - controllerManager = "controller-manager" - scheduler = "scheduler" - proxy = "proxy" kubeAPIServer = "kube-apiserver" kubeControllerManager = "kube-controller-manager" kubeScheduler = "kube-scheduler" - kubeProxy = "kube-proxy" ) // WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk // where kubelet will pick and schedule them. func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { - volumes := []api.Volume{k8sVolume()} - volumeMounts := []api.VolumeMount{k8sVolumeMount()} + volumes := []v1.Volume{k8sVolume()} + volumeMounts := []v1.VolumeMount{k8sVolumeMount()} if isCertsVolumeMountNeeded() { volumes = append(volumes, certsVolume(cfg)) @@ -85,31 +79,31 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { } // Prepare static pod specs - staticPodSpecs := map[string]api.Pod{ - kubeAPIServer: componentPod(api.Container{ + staticPodSpecs := map[string]v1.Pod{ + kubeAPIServer: componentPod(v1.Container{ Name: kubeAPIServer, - Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, cfg.UnifiedControlPlaneImage), Command: getAPIServerCommand(cfg, false, k8sVersion), VolumeMounts: volumeMounts, - LivenessProbe: componentProbe(int(cfg.API.BindPort), "/healthz", api.URISchemeHTTPS), + LivenessProbe: componentProbe(int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS), Resources: componentResources("250m"), Env: getProxyEnvVars(), }, volumes...), - kubeControllerManager: componentPod(api.Container{ + kubeControllerManager: componentPod(v1.Container{ Name: kubeControllerManager, - Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, cfg.UnifiedControlPlaneImage), Command: getControllerManagerCommand(cfg, false, k8sVersion), VolumeMounts: volumeMounts, - LivenessProbe: componentProbe(10252, "/healthz", api.URISchemeHTTP), + LivenessProbe: componentProbe(10252, "/healthz", v1.URISchemeHTTP), Resources: componentResources("200m"), Env: getProxyEnvVars(), }, volumes...), - kubeScheduler: componentPod(api.Container{ + kubeScheduler: componentPod(v1.Container{ Name: kubeScheduler, - Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, cfg.UnifiedControlPlaneImage), Command: getSchedulerCommand(cfg, false), - VolumeMounts: []api.VolumeMount{k8sVolumeMount()}, - LivenessProbe: componentProbe(10251, "/healthz", api.URISchemeHTTP), + VolumeMounts: []v1.VolumeMount{k8sVolumeMount()}, + LivenessProbe: componentProbe(10251, "/healthz", v1.URISchemeHTTP), Resources: componentResources("100m"), Env: getProxyEnvVars(), }, k8sVolume()), @@ -117,16 +111,16 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { // Add etcd static pod spec only if external etcd is not configured if len(cfg.Etcd.Endpoints) == 0 { - etcdPod := componentPod(api.Container{ + etcdPod := componentPod(v1.Container{ Name: etcd, Command: getEtcdCommand(cfg), - VolumeMounts: []api.VolumeMount{certsVolumeMount(), etcdVolumeMount(cfg.Etcd.DataDir), k8sVolumeMount()}, - Image: images.GetCoreImage(images.KubeEtcdImage, cfg, kubeadmapi.GlobalEnvParams.EtcdImage), - LivenessProbe: componentProbe(2379, "/health", api.URISchemeHTTP), + VolumeMounts: []v1.VolumeMount{certsVolumeMount(), etcdVolumeMount(cfg.Etcd.DataDir), k8sVolumeMount()}, + Image: images.GetCoreImage(images.KubeEtcdImage, cfg, cfg.Etcd.Image), + LivenessProbe: componentProbe(2379, "/health", v1.URISchemeHTTP), }, certsVolume(cfg), etcdVolume(cfg), k8sVolume()) - etcdPod.Spec.SecurityContext = &api.PodSecurityContext{ - SELinuxOptions: &api.SELinuxOptions{ + etcdPod.Spec.SecurityContext = &v1.PodSecurityContext{ + SELinuxOptions: &v1.SELinuxOptions{ // Unconfine the etcd container so it can write to the data dir with SELinux enforcing: Type: "spc_t", }, @@ -152,34 +146,34 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { return nil } -func newVolume(name, path string) api.Volume { - return api.Volume{ +func newVolume(name, path string) v1.Volume { + return v1.Volume{ Name: name, - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: path}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: path}, }, } } -func newVolumeMount(name, path string) api.VolumeMount { - return api.VolumeMount{ +func newVolumeMount(name, path string) v1.VolumeMount { + return v1.VolumeMount{ Name: name, MountPath: path, } } // etcdVolume exposes a path on the host in order to guarantee data survival during reboot. -func etcdVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume { - return api.Volume{ +func etcdVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume { + return v1.Volume{ Name: "etcd", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: cfg.Etcd.DataDir}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: cfg.Etcd.DataDir}, }, } } -func etcdVolumeMount(dataDir string) api.VolumeMount { - return api.VolumeMount{ +func etcdVolumeMount(dataDir string) v1.VolumeMount { + return v1.VolumeMount{ Name: "etcd", MountPath: dataDir, } @@ -192,18 +186,18 @@ func isCertsVolumeMountNeeded() bool { } // certsVolume exposes host SSL certificates to pod containers. -func certsVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume { - return api.Volume{ +func certsVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume { + return v1.Volume{ Name: "certs", - VolumeSource: api.VolumeSource{ + VolumeSource: v1.VolumeSource{ // TODO(phase1+) make path configurable - HostPath: &api.HostPathVolumeSource{Path: "/etc/ssl/certs"}, + HostPath: &v1.HostPathVolumeSource{Path: "/etc/ssl/certs"}, }, } } -func certsVolumeMount() api.VolumeMount { - return api.VolumeMount{ +func certsVolumeMount() v1.VolumeMount { + return v1.VolumeMount{ Name: "certs", MountPath: "/etc/ssl/certs", } @@ -218,69 +212,52 @@ func isPkiVolumeMountNeeded() bool { return false } -func pkiVolume() api.Volume { - return api.Volume{ +func pkiVolume() v1.Volume { + return v1.Volume{ Name: "pki", - VolumeSource: api.VolumeSource{ + VolumeSource: v1.VolumeSource{ // TODO(phase1+) make path configurable - HostPath: &api.HostPathVolumeSource{Path: "/etc/pki"}, + HostPath: &v1.HostPathVolumeSource{Path: "/etc/pki"}, }, } } -func pkiVolumeMount() api.VolumeMount { - return api.VolumeMount{ +func pkiVolumeMount() v1.VolumeMount { + return v1.VolumeMount{ Name: "pki", MountPath: "/etc/pki", } } -func flockVolume() api.Volume { - return api.Volume{ - Name: "var-lock", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: "/var/lock"}, - }, - } -} - -func flockVolumeMount() api.VolumeMount { - return api.VolumeMount{ - Name: "var-lock", - MountPath: "/var/lock", - ReadOnly: false, - } -} - -func k8sVolume() api.Volume { - return api.Volume{ +func k8sVolume() v1.Volume { + return v1.Volume{ Name: "k8s", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.KubernetesDir}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.KubernetesDir}, }, } } -func k8sVolumeMount() api.VolumeMount { - return api.VolumeMount{ +func k8sVolumeMount() v1.VolumeMount { + return v1.VolumeMount{ Name: "k8s", MountPath: kubeadmapi.GlobalEnvParams.KubernetesDir, ReadOnly: true, } } -func componentResources(cpu string) api.ResourceRequirements { - return api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceCPU): resource.MustParse(cpu), +func componentResources(cpu string) v1.ResourceRequirements { + return v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu), }, } } -func componentProbe(port int, path string, scheme api.URIScheme) *api.Probe { - return &api.Probe{ - Handler: api.Handler{ - HTTPGet: &api.HTTPGetAction{ +func componentProbe(port int, path string, scheme v1.URIScheme) *v1.Probe { + return &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Host: "127.0.0.1", Path: path, Port: intstr.FromInt(port), @@ -293,8 +270,8 @@ func componentProbe(port int, path string, scheme api.URIScheme) *api.Probe { } } -func componentPod(container api.Container, volumes ...api.Volume) api.Pod { - return api.Pod{ +func componentPod(container v1.Container, volumes ...v1.Volume) v1.Pod { + return v1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", @@ -305,30 +282,15 @@ func componentPod(container api.Container, volumes ...api.Volume) api.Pod { Annotations: map[string]string{kubetypes.CriticalPodAnnotationKey: ""}, Labels: map[string]string{"component": container.Name, "tier": "control-plane"}, }, - Spec: api.PodSpec{ - Containers: []api.Container{container}, + Spec: v1.PodSpec{ + Containers: []v1.Container{container}, HostNetwork: true, Volumes: volumes, }, } } -func getComponentBaseCommand(component string) []string { - if kubeadmapi.GlobalEnvParams.HyperkubeImage != "" { - return []string{"/hyperkube", component} - } - - return []string{"kube-" + component} -} - func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool, k8sVersion *version.Version) []string { - var command []string - - // self-hosted apiserver needs to wait on a lock - if selfHosted { - command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"} - } - defaultArguments := map[string]string{ "insecure-port": "0", "admission-control": defaultv17AdmissionControl, @@ -350,14 +312,11 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool, k "requestheader-extra-headers-prefix": "X-Remote-Extra-", "requestheader-client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertName), "requestheader-allowed-names": "front-proxy-client", - } - if k8sVersion.AtLeast(kubeadmconstants.MinimumAPIAggregationVersion) { - // add options which allow the kube-apiserver to act as a front-proxy to aggregated API servers - defaultArguments["proxy-client-cert-file"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientCertName) - defaultArguments["proxy-client-key-file"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientKeyName) + "proxy-client-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientCertName), + "proxy-client-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientKeyName), } - command = getComponentBaseCommand(apiServer) + command := []string{"kube-apiserver"} command = append(command, getExtraParameters(cfg.APIServerExtraArgs, defaultArguments)...) command = append(command, getAuthzParameters(cfg.AuthorizationModes)...) @@ -397,28 +356,18 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool, k } func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string { - var command []string - defaultArguments := map[string]string{ "listen-client-urls": "http://127.0.0.1:2379", "advertise-client-urls": "http://127.0.0.1:2379", "data-dir": cfg.Etcd.DataDir, } - command = append(command, "etcd") + command := []string{"etcd"} command = append(command, getExtraParameters(cfg.Etcd.ExtraArgs, defaultArguments)...) - return command } func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool, k8sVersion *version.Version) []string { - var command []string - - // self-hosted controller-manager needs to wait on a lock - if selfHosted { - command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"} - } - defaultArguments := map[string]string{ "address": "127.0.0.1", "leader-elect": "true", @@ -430,13 +379,8 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted "use-service-account-credentials": "true", "controllers": "*,bootstrapsigner,tokencleaner", } - if k8sVersion.LessThan(kubeadmconstants.MinimumCSRSARApproverVersion) { - // enable the former CSR group approver for v1.6 clusters. - // TODO(luxas): Remove this once we're targeting v1.8 at HEAD - defaultArguments["insecure-experimental-approve-all-kubelet-csrs-for-group"] = bootstrapapi.BootstrapGroup - } - command = getComponentBaseCommand(controllerManager) + command := []string{"kube-controller-manager"} command = append(command, getExtraParameters(cfg.ControllerManagerExtraArgs, defaultArguments)...) if cfg.CloudProvider != "" { @@ -453,32 +397,23 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted if cfg.Networking.PodSubnet != "" { command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet) } - return command } func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string { - var command []string - - // self-hosted apiserver needs to wait on a lock - if selfHosted { - command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"} - } - defaultArguments := map[string]string{ "address": "127.0.0.1", "leader-elect": "true", "kubeconfig": filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName), } - command = getComponentBaseCommand(scheduler) + command := []string{"kube-scheduler"} command = append(command, getExtraParameters(cfg.SchedulerExtraArgs, defaultArguments)...) - return command } -func getProxyEnvVars() []api.EnvVar { - envs := []api.EnvVar{} +func getProxyEnvVars() []v1.EnvVar { + envs := []v1.EnvVar{} for _, env := range os.Environ() { pos := strings.Index(env, "=") if pos == -1 { @@ -488,26 +423,13 @@ func getProxyEnvVars() []api.EnvVar { name := env[:pos] value := env[pos+1:] if strings.HasSuffix(strings.ToLower(name), "_proxy") && value != "" { - envVar := api.EnvVar{Name: name, Value: value} + envVar := v1.EnvVar{Name: name, Value: value} envs = append(envs, envVar) } } return envs } -func getSelfHostedAPIServerEnv() []api.EnvVar { - podIPEnvVar := api.EnvVar{ - Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, - } - - return append(getProxyEnvVars(), podIPEnvVar) -} - // getAuthzParameters gets the authorization-related parameters to the api server // At this point, we can assume the list of authorization modes is valid (due to that it has been validated in the API machinery code already) // If the list is empty; it's defaulted (mostly for unit testing) diff --git a/cmd/kubeadm/app/master/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go similarity index 91% rename from cmd/kubeadm/app/master/manifests_test.go rename to cmd/kubeadm/app/phases/controlplane/manifests_test.go index 2ff66df5402..8da0566b84a 100644 --- a/cmd/kubeadm/app/master/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package master +package controlplane import ( "fmt" @@ -25,7 +25,7 @@ import ( "sort" "testing" - api "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -93,7 +93,7 @@ func TestWriteStaticPodManifests(t *testing.T) { } defer manifest.Close() - var pod api.Pod + var pod v1.Pod d := yaml.NewYAMLOrJSONDecoder(manifest, 4096) if err := d.Decode(&pod); err != nil { t.Error("WriteStaticPodManifests: error decoding manifests/kube-apiserver.yaml into Pod") @@ -131,15 +131,15 @@ func TestNewVolume(t *testing.T) { var tests = []struct { name string path string - expected api.Volume + expected v1.Volume }{ { name: "foo", path: "/etc/foo", - expected: api.Volume{ + expected: v1.Volume{ Name: "foo", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: "/etc/foo"}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "/etc/foo"}, }}, }, } @@ -167,12 +167,12 @@ func TestNewVolumeMount(t *testing.T) { var tests = []struct { name string path string - expected api.VolumeMount + expected v1.VolumeMount }{ { name: "foo", path: "/etc/foo", - expected: api.VolumeMount{ + expected: v1.VolumeMount{ Name: "foo", MountPath: "/etc/foo", }, @@ -201,16 +201,16 @@ func TestNewVolumeMount(t *testing.T) { func TestEtcdVolume(t *testing.T) { var tests = []struct { cfg *kubeadmapi.MasterConfiguration - expected api.Volume + expected v1.Volume }{ { cfg: &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{DataDir: etcdDataDir}, }, - expected: api.Volume{ + expected: v1.Volume{ Name: "etcd", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{Path: etcdDataDir}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: etcdDataDir}, }}, }, } @@ -236,10 +236,10 @@ func TestEtcdVolume(t *testing.T) { func TestEtcdVolumeMount(t *testing.T) { var tests = []struct { - expected api.VolumeMount + expected v1.VolumeMount }{ { - expected: api.VolumeMount{ + expected: v1.VolumeMount{ Name: "etcd", MountPath: etcdDataDir, }, @@ -268,14 +268,14 @@ func TestEtcdVolumeMount(t *testing.T) { func TestCertsVolume(t *testing.T) { var tests = []struct { cfg *kubeadmapi.MasterConfiguration - expected api.Volume + expected v1.Volume }{ { cfg: &kubeadmapi.MasterConfiguration{}, - expected: api.Volume{ + expected: v1.Volume{ Name: "certs", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{ + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ Path: "/etc/ssl/certs"}, }}, }, @@ -302,10 +302,10 @@ func TestCertsVolume(t *testing.T) { func TestCertsVolumeMount(t *testing.T) { var tests = []struct { - expected api.VolumeMount + expected v1.VolumeMount }{ { - expected: api.VolumeMount{ + expected: v1.VolumeMount{ Name: "certs", MountPath: "/etc/ssl/certs", }, @@ -333,13 +333,13 @@ func TestCertsVolumeMount(t *testing.T) { func TestK8sVolume(t *testing.T) { var tests = []struct { - expected api.Volume + expected v1.Volume }{ { - expected: api.Volume{ + expected: v1.Volume{ Name: "k8s", - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{ + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ Path: kubeadmapi.GlobalEnvParams.KubernetesDir}, }}, }, @@ -366,10 +366,10 @@ func TestK8sVolume(t *testing.T) { func TestK8sVolumeMount(t *testing.T) { var tests = []struct { - expected api.VolumeMount + expected v1.VolumeMount }{ { - expected: api.VolumeMount{ + expected: v1.VolumeMount{ Name: "k8s", MountPath: kubeadmapi.GlobalEnvParams.KubernetesDir, ReadOnly: true, @@ -416,17 +416,17 @@ func TestComponentProbe(t *testing.T) { var tests = []struct { port int path string - scheme api.URIScheme + scheme v1.URIScheme }{ { port: 1, path: "foo", - scheme: api.URISchemeHTTP, + scheme: v1.URISchemeHTTP, }, { port: 2, path: "bar", - scheme: api.URISchemeHTTPS, + scheme: v1.URISchemeHTTPS, }, } for _, rt := range tests { @@ -465,8 +465,8 @@ func TestComponentPod(t *testing.T) { } for _, rt := range tests { - c := api.Container{Name: rt.n} - v := api.Volume{} + c := v1.Container{Name: rt.n} + v := v1.Volume{} actual := componentPod(c, v) if actual.ObjectMeta.Name != rt.n { t.Errorf( @@ -478,35 +478,6 @@ func TestComponentPod(t *testing.T) { } } -func TestGetComponentBaseCommand(t *testing.T) { - var tests = []struct { - c string - expected []string - }{ - { - c: "foo", - expected: []string{"kube-foo", "--v=2"}, - }, - { - c: "bar", - expected: []string{"kube-bar", "--v=2"}, - }, - } - - for _, rt := range tests { - actual := getComponentBaseCommand(rt.c) - for i := range actual { - if actual[i] != rt.expected[i] { - t.Errorf( - "failed getComponentBaseCommand:\n\texpected: %s\n\t actual: %s", - rt.expected[i], - actual[i], - ) - } - } - } -} - func TestGetAPIServerCommand(t *testing.T) { var tests = []struct { cfg *kubeadmapi.MasterConfiguration @@ -567,9 +538,9 @@ func TestGetAPIServerCommand(t *testing.T) { "--secure-port=123", "--allow-privileged=true", "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", + "--experimental-bootstrap-token-auth=true", "--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt", "--proxy-client-key-file=/var/lib/certs/front-proxy-client.key", - "--experimental-bootstrap-token-auth=true", "--requestheader-username-headers=X-Remote-User", "--requestheader-group-headers=X-Remote-Group", "--requestheader-extra-headers-prefix=X-Remote-Extra-", @@ -726,25 +697,6 @@ func TestGetControllerManagerCommand(t *testing.T) { "--controllers=*,bootstrapsigner,tokencleaner", }, }, - { - cfg: &kubeadmapi.MasterConfiguration{ - CertificatesDir: testCertsDir, - KubernetesVersion: "v1.6.4", - }, - expected: []string{ - "kube-controller-manager", - "--address=127.0.0.1", - "--leader-elect=true", - "--kubeconfig=" + kubeadmapi.GlobalEnvParams.KubernetesDir + "/controller-manager.conf", - "--root-ca-file=" + testCertsDir + "/ca.crt", - "--service-account-private-key-file=" + testCertsDir + "/sa.key", - "--cluster-signing-cert-file=" + testCertsDir + "/ca.crt", - "--cluster-signing-key-file=" + testCertsDir + "/ca.key", - "--use-service-account-credentials=true", - "--controllers=*,bootstrapsigner,tokencleaner", - "--insecure-experimental-approve-all-kubelet-csrs-for-group=system:bootstrappers", - }, - }, { cfg: &kubeadmapi.MasterConfiguration{ CloudProvider: "foo", @@ -998,18 +950,3 @@ func TestGetExtraParameters(t *testing.T) { } } } - -func TestVersionCompare(t *testing.T) { - versions := []string{ - "v1.7.0-alpha.1", - "v1.7.0-beta.0", - "v1.7.0-rc.0", - "v1.7.0", - "v1.7.1", - } - for _, v := range versions { - if !version.MustParseSemantic(v).AtLeast(kubeadmconstants.MinimumAPIAggregationVersion) { - t.Errorf("err") - } - } -} diff --git a/cmd/kubeadm/app/phases/kubeconfig/BUILD b/cmd/kubeadm/app/phases/kubeconfig/BUILD index 4e26adcd825..d5038b310ec 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/BUILD +++ b/cmd/kubeadm/app/phases/kubeconfig/BUILD @@ -18,7 +18,6 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library", "//cmd/kubeadm/app/util/kubeconfig:go_default_library", - "//pkg/util/node:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index d842c3eb0b8..e3df1d43083 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -29,7 +29,6 @@ import ( kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - "k8s.io/kubernetes/pkg/util/node" ) // BuildConfigProperties holds some simple information about how this phase should build the KubeConfig object @@ -53,12 +52,7 @@ type BuildConfigProperties struct { // /etc/kubernetes/{admin,kubelet}.conf exist but not certs => certs will be generated and conflict with the kubeconfig files => error // CreateInitKubeConfigFiles is called from the main init and does the work for the default phase behaviour -func CreateInitKubeConfigFiles(masterEndpoint, pkiDir, outDir string) error { - - nodeName := node.GetHostname("") - if len(nodeName) == 0 { - return fmt.Errorf("unable to get hostname for master node") - } +func CreateInitKubeConfigFiles(masterEndpoint, pkiDir, outDir, nodeName string) error { // Create a lightweight specification for what the files should look like filesToCreateFromSpec := map[string]BuildConfigProperties{ diff --git a/cmd/kubeadm/app/phases/selfhosting/BUILD b/cmd/kubeadm/app/phases/selfhosting/BUILD new file mode 100644 index 00000000000..5cd98a4045f --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/BUILD @@ -0,0 +1,61 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "podspec_mutation_test.go", + "selfhosting_test.go", + "selfhosting_volumes_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "podspec_mutation.go", + "selfhosting.go", + "selfhosting_volumes.go", + ], + tags = ["automanaged"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/util:go_default_library", + "//pkg/api:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go new file mode 100644 index 00000000000..cf7b2edae7a --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "k8s.io/api/core/v1" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +// mutatePodSpec makes a Static Pod-hosted PodSpec suitable for self-hosting +func mutatePodSpec(cfg *kubeadmapi.MasterConfiguration, name string, podSpec *v1.PodSpec) { + mutators := map[string][]func(*kubeadmapi.MasterConfiguration, *v1.PodSpec){ + kubeAPIServer: { + addNodeSelectorToPodSpec, + setMasterTolerationOnPodSpec, + setRightDNSPolicyOnPodSpec, + setVolumesOnKubeAPIServerPodSpec, + }, + kubeControllerManager: { + addNodeSelectorToPodSpec, + setMasterTolerationOnPodSpec, + setRightDNSPolicyOnPodSpec, + setVolumesOnKubeControllerManagerPodSpec, + }, + kubeScheduler: { + addNodeSelectorToPodSpec, + setMasterTolerationOnPodSpec, + setRightDNSPolicyOnPodSpec, + setVolumesOnKubeSchedulerPodSpec, + }, + } + + // Get the mutator functions for the component in question, then loop through and execute them + mutatorsForComponent := mutators[name] + for _, mutateFunc := range mutatorsForComponent { + mutateFunc(cfg, podSpec) + } +} + +// addNodeSelectorToPodSpec makes Pod require to be scheduled on a node marked with the master label +func addNodeSelectorToPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + if podSpec.NodeSelector == nil { + podSpec.NodeSelector = map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""} + return + } + + podSpec.NodeSelector[kubeadmconstants.LabelNodeRoleMaster] = "" +} + +// setMasterTolerationOnPodSpec makes the Pod tolerate the master taint +func setMasterTolerationOnPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + if podSpec.Tolerations == nil { + podSpec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration} + return + } + + podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.MasterToleration) +} + +// setRightDNSPolicyOnPodSpec makes sure the self-hosted components can look up things via kube-dns if necessary +func setRightDNSPolicyOnPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet +} + +// setVolumesOnKubeAPIServerPodSpec makes sure the self-hosted api server has the required files +func setVolumesOnKubeAPIServerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + setK8sVolume(apiServerProjectedVolume, cfg, podSpec) + for _, c := range podSpec.Containers { + c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount()) + } +} + +// setVolumesOnKubeControllerManagerPodSpec makes sure the self-hosted controller manager has the required files +func setVolumesOnKubeControllerManagerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + setK8sVolume(controllerManagerProjectedVolume, cfg, podSpec) + for _, c := range podSpec.Containers { + c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount()) + } +} + +// setVolumesOnKubeSchedulerPodSpec makes sure the self-hosted scheduler has the required files +func setVolumesOnKubeSchedulerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + setK8sVolume(schedulerProjectedVolume, cfg, podSpec) + for _, c := range podSpec.Containers { + c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount()) + } +} + +func setK8sVolume(cb func(cfg *kubeadmapi.MasterConfiguration) v1.Volume, cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) { + for i, v := range podSpec.Volumes { + if v.Name == "k8s" { + podSpec.Volumes[i] = cb(cfg) + } + } +} diff --git a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go new file mode 100644 index 00000000000..e84214bf8b8 --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go @@ -0,0 +1,190 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +func TestMutatePodSpec(t *testing.T) { + var tests = []struct { + component string + podSpec *v1.PodSpec + expected v1.PodSpec + }{ + { + component: kubeAPIServer, + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + NodeSelector: map[string]string{ + kubeadmconstants.LabelNodeRoleMaster: "", + }, + Tolerations: []v1.Toleration{ + kubeadmconstants.MasterToleration, + }, + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + }, + { + component: kubeControllerManager, + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + NodeSelector: map[string]string{ + kubeadmconstants.LabelNodeRoleMaster: "", + }, + Tolerations: []v1.Toleration{ + kubeadmconstants.MasterToleration, + }, + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + }, + { + component: kubeScheduler, + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + NodeSelector: map[string]string{ + kubeadmconstants.LabelNodeRoleMaster: "", + }, + Tolerations: []v1.Toleration{ + kubeadmconstants.MasterToleration, + }, + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + }, + } + + cfg := &kubeadmapi.MasterConfiguration{} + for _, rt := range tests { + mutatePodSpec(cfg, rt.component, rt.podSpec) + + if !reflect.DeepEqual(*rt.podSpec, rt.expected) { + t.Errorf("failed mutatePodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec) + } + } +} + +func TestAddNodeSelectorToPodSpec(t *testing.T) { + var tests = []struct { + podSpec *v1.PodSpec + expected v1.PodSpec + }{ + { + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + NodeSelector: map[string]string{ + kubeadmconstants.LabelNodeRoleMaster: "", + }, + }, + }, + { + podSpec: &v1.PodSpec{ + NodeSelector: map[string]string{ + "foo": "bar", + }, + }, + expected: v1.PodSpec{ + NodeSelector: map[string]string{ + "foo": "bar", + kubeadmconstants.LabelNodeRoleMaster: "", + }, + }, + }, + } + + cfg := &kubeadmapi.MasterConfiguration{} + for _, rt := range tests { + addNodeSelectorToPodSpec(cfg, rt.podSpec) + + if !reflect.DeepEqual(*rt.podSpec, rt.expected) { + t.Errorf("failed addNodeSelectorToPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec) + } + } +} + +func TestSetMasterTolerationOnPodSpec(t *testing.T) { + var tests = []struct { + podSpec *v1.PodSpec + expected v1.PodSpec + }{ + { + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + Tolerations: []v1.Toleration{ + kubeadmconstants.MasterToleration, + }, + }, + }, + { + podSpec: &v1.PodSpec{ + Tolerations: []v1.Toleration{ + {Key: "foo", Value: "bar"}, + }, + }, + expected: v1.PodSpec{ + Tolerations: []v1.Toleration{ + {Key: "foo", Value: "bar"}, + kubeadmconstants.MasterToleration, + }, + }, + }, + } + + cfg := &kubeadmapi.MasterConfiguration{} + for _, rt := range tests { + setMasterTolerationOnPodSpec(cfg, rt.podSpec) + + if !reflect.DeepEqual(*rt.podSpec, rt.expected) { + t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec) + } + } +} + +func TestSetRightDNSPolicyOnPodSpec(t *testing.T) { + var tests = []struct { + podSpec *v1.PodSpec + expected v1.PodSpec + }{ + { + podSpec: &v1.PodSpec{}, + expected: v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + }, + { + podSpec: &v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirst, + }, + expected: v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + }, + } + + cfg := &kubeadmapi.MasterConfiguration{} + for _, rt := range tests { + setRightDNSPolicyOnPodSpec(cfg, rt.podSpec) + + if !reflect.DeepEqual(*rt.podSpec, rt.expected) { + t.Errorf("failed setRightDNSPolicyOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec) + } + } +} diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go new file mode 100644 index 00000000000..421ede33cf6 --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + "k8s.io/kubernetes/pkg/api" +) + +const ( + kubeAPIServer = "kube-apiserver" + kubeControllerManager = "kube-controller-manager" + kubeScheduler = "kube-scheduler" + + selfHostingPrefix = "self-hosted-" +) + +// CreateSelfHostedControlPlane is responsible for turning a Static Pod-hosted control plane to a self-hosted one +// It achieves that task this way: +// 1. Load the Static Pod specification from disk (from /etc/kubernetes/manifests) +// 2. Extract the PodSpec from that Static Pod specification +// 3. Mutate the PodSpec to be compatible with self-hosting (add the right labels, taints, etc. so it can schedule correctly) +// 4. Build a new DaemonSet object for the self-hosted component in question. Use the above mentioned PodSpec +// 5. Create the DaemonSet resource. Wait until the Pods are running. +// 6. Remove the Static Pod manifest file. The kubelet will stop the original Static Pod-hosted component that was running. +// 7. The self-hosted containers should now step up and take over. +// 8. In order to avoid race conditions, we're still making sure the API /healthz endpoint is healthy +// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop +func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error { + + if err := createTLSSecrets(cfg, client); err != nil { + return err + } + + if err := createOpaqueSecrets(cfg, client); err != nil { + return err + } + + // The sequence here isn't set in stone, but seems to work well to start with the API server + components := []string{kubeAPIServer, kubeControllerManager, kubeScheduler} + + for _, componentName := range components { + start := time.Now() + manifestPath := buildStaticManifestFilepath(componentName) + + // Load the Static Pod file in order to be able to create a self-hosted variant of that file + podSpec, err := loadPodSpecFromFile(manifestPath) + if err != nil { + return err + } + + // Build a DaemonSet object from the loaded PodSpec + ds := buildDaemonSet(cfg, componentName, podSpec) + + // Create the DaemonSet in the API Server + if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Create(ds); err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create self-hosted %q daemonset [%v]", componentName, err) + } + + if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Update(ds); err != nil { + // TODO: We should retry on 409 responses + return fmt.Errorf("failed to update self-hosted %q daemonset [%v]", componentName, err) + } + } + + // Wait for the self-hosted component to come up + kubeadmutil.WaitForPodsWithLabel(client, buildSelfHostedWorkloadLabelQuery(componentName)) + + // Remove the old Static Pod manifest + if err := os.RemoveAll(manifestPath); err != nil { + return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err) + } + + // Make sure the API is responsive at /healthz + kubeadmutil.WaitForAPI(client) + + fmt.Printf("[self-hosted] self-hosted %s ready after %f seconds\n", componentName, time.Since(start).Seconds()) + } + return nil +} + +// buildDaemonSet is responsible for mutating the PodSpec and return a DaemonSet which is suitable for the self-hosting purporse +func buildDaemonSet(cfg *kubeadmapi.MasterConfiguration, name string, podSpec *v1.PodSpec) *extensions.DaemonSet { + // Mutate the PodSpec so it's suitable for self-hosting + mutatePodSpec(cfg, name, podSpec) + + // Return a DaemonSet based on that Spec + return &extensions.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: addSelfHostedPrefix(name), + Namespace: metav1.NamespaceSystem, + Labels: map[string]string{ + "k8s-app": addSelfHostedPrefix(name), + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s-app": addSelfHostedPrefix(name), + }, + }, + Spec: *podSpec, + }, + }, + } +} + +// loadPodSpecFromFile reads and decodes a file containing a specification of a Pod +// TODO: Consider using "k8s.io/kubernetes/pkg/volume/util".LoadPodFromFile(filename string) in the future instead. +func loadPodSpecFromFile(manifestPath string) (*v1.PodSpec, error) { + podBytes, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, err + } + + staticPod := &v1.Pod{} + if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), podBytes, staticPod); err != nil { + return nil, fmt.Errorf("unable to decode static pod %v", err) + } + + return &staticPod.Spec, nil +} + +// buildStaticManifestFilepath returns the location on the disk where the Static Pod should be present +func buildStaticManifestFilepath(componentName string) string { + return filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, componentName+".yaml") +} + +// buildSelfHostedWorkloadLabelQuery creates the right query for matching a self-hosted Pod +func buildSelfHostedWorkloadLabelQuery(componentName string) string { + return fmt.Sprintf("k8s-app=%s", addSelfHostedPrefix(componentName)) +} + +// addSelfHostedPrefix adds the self-hosted- prefix to the component name +func addSelfHostedPrefix(componentName string) string { + return fmt.Sprintf("%s%s", selfHostingPrefix, componentName) +} diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go new file mode 100644 index 00000000000..558276b5c29 --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -0,0 +1,658 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/ghodss/yaml" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" +) + +const ( + testAPIServerPod = ` +apiVersion: v1 +kind: Pod +metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + component: kube-apiserver + tier: control-plane + name: kube-apiserver + namespace: kube-system +spec: + containers: + - command: + - kube-apiserver + - --client-ca-file=/etc/kubernetes/pki/ca.crt + - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key + - --allow-privileged=true + - --service-cluster-ip-range=10.96.0.0/12 + - --service-account-key-file=/etc/kubernetes/pki/sa.pub + - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt + - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt + - --secure-port=6443 + - --insecure-port=0 + - --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota + - --requestheader-extra-headers-prefix=X-Remote-Extra- + - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt + - --experimental-bootstrap-token-auth=true + - --requestheader-group-headers=X-Remote-Group + - --requestheader-allowed-names=front-proxy-client + - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt + - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --requestheader-username-headers=X-Remote-User + - --authorization-mode=Node,RBAC + - --advertise-address=192.168.200.101 + - --etcd-servers=http://127.0.0.1:2379 + image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-apiserver + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + - mountPath: /etc/ssl/certs + name: certs + - mountPath: /etc/pki + name: pki + hostNetwork: true + volumes: + - name: k8s + projected: + sources: + - secret: + items: + - key: tls.crt + path: ca.crt + - key: tls.key + path: ca.key + name: ca + - secret: + items: + - key: tls.crt + path: apiserver.crt + - key: tls.key + path: apiserver.key + name: apiserver + - secret: + items: + - key: tls.crt + path: apiserver-kubelet-client.crt + - key: tls.key + path: apiserver-kubelet-client.key + name: apiserver-kubelet-client + - secret: + items: + - key: tls.crt + path: sa.pub + - key: tls.key + path: sa.key + name: sa + - secret: + items: + - key: tls.crt + path: front-proxy-ca.crt + name: front-proxy-ca + - secret: + items: + - key: tls.crt + path: front-proxy-client.crt + - key: tls.key + path: front-proxy-client.key + name: front-proxy-client + - hostPath: + path: /etc/ssl/certs + name: certs + - hostPath: + path: /etc/pki + name: pki +status: {} +` + + testAPIServerDaemonSet = `metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-apiserver + name: self-hosted-kube-apiserver + namespace: kube-system +spec: + template: + metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-apiserver + spec: + containers: + - command: + - kube-apiserver + - --client-ca-file=/etc/kubernetes/pki/ca.crt + - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key + - --allow-privileged=true + - --service-cluster-ip-range=10.96.0.0/12 + - --service-account-key-file=/etc/kubernetes/pki/sa.pub + - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt + - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt + - --secure-port=6443 + - --insecure-port=0 + - --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota + - --requestheader-extra-headers-prefix=X-Remote-Extra- + - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt + - --experimental-bootstrap-token-auth=true + - --requestheader-group-headers=X-Remote-Group + - --requestheader-allowed-names=front-proxy-client + - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt + - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --requestheader-username-headers=X-Remote-User + - --authorization-mode=Node,RBAC + - --advertise-address=192.168.200.101 + - --etcd-servers=http://127.0.0.1:2379 + image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-apiserver + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + - mountPath: /etc/ssl/certs + name: certs + - mountPath: /etc/pki + name: pki + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - name: k8s + projected: + sources: + - secret: + items: + - key: tls.crt + path: ca.crt + - key: tls.key + path: ca.key + name: ca + - secret: + items: + - key: tls.crt + path: apiserver.crt + - key: tls.key + path: apiserver.key + name: apiserver + - secret: + items: + - key: tls.crt + path: apiserver-kubelet-client.crt + - key: tls.key + path: apiserver-kubelet-client.key + name: apiserver-kubelet-client + - secret: + items: + - key: tls.crt + path: sa.pub + - key: tls.key + path: sa.key + name: sa + - secret: + items: + - key: tls.crt + path: front-proxy-ca.crt + name: front-proxy-ca + - secret: + items: + - key: tls.crt + path: front-proxy-client.crt + - key: tls.key + path: front-proxy-client.key + name: front-proxy-client + - hostPath: + path: /etc/ssl/certs + name: certs + - hostPath: + path: /etc/pki + name: pki + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 +` + + testControllerManagerPod = ` +apiVersion: v1 +kind: Pod +metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + component: kube-controller-manager + tier: control-plane + name: kube-controller-manager + namespace: kube-system +spec: + containers: + - command: + - kube-controller-manager + - --service-account-private-key-file=/etc/kubernetes/pki/sa.key + - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt + - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key + - --leader-elect=true + - --kubeconfig=/etc/kubernetes/controller-manager.conf + - --controllers=*,bootstrapsigner,tokencleaner + - --root-ca-file=/etc/kubernetes/pki/ca.crt + - --address=127.0.0.1 + - --use-service-account-credentials=true + image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + - mountPath: /etc/ssl/certs + name: certs + - mountPath: /etc/pki + name: pki + hostNetwork: true + volumes: + - name: k8s + projected: + sources: + - secret: + name: controller-manager.conf + - secret: + items: + - key: tls.crt + path: ca.crt + - key: tls.key + path: ca.key + name: ca + - secret: + items: + - key: tls.key + path: sa.key + name: sa + - hostPath: + path: /etc/ssl/certs + name: certs + - hostPath: + path: /etc/pki + name: pki +status: {} +` + + testControllerManagerDaemonSet = `metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-controller-manager + name: self-hosted-kube-controller-manager + namespace: kube-system +spec: + template: + metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-controller-manager + spec: + containers: + - command: + - kube-controller-manager + - --service-account-private-key-file=/etc/kubernetes/pki/sa.key + - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt + - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key + - --leader-elect=true + - --kubeconfig=/etc/kubernetes/controller-manager.conf + - --controllers=*,bootstrapsigner,tokencleaner + - --root-ca-file=/etc/kubernetes/pki/ca.crt + - --address=127.0.0.1 + - --use-service-account-credentials=true + image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + - mountPath: /etc/ssl/certs + name: certs + - mountPath: /etc/pki + name: pki + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - name: k8s + projected: + sources: + - secret: + name: controller-manager.conf + - secret: + items: + - key: tls.crt + path: ca.crt + - key: tls.key + path: ca.key + name: ca + - secret: + items: + - key: tls.key + path: sa.key + name: sa + - hostPath: + path: /etc/ssl/certs + name: certs + - hostPath: + path: /etc/pki + name: pki + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 +` + + testSchedulerPod = ` +apiVersion: v1 +kind: Pod +metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + component: kube-scheduler + tier: control-plane + name: kube-scheduler + namespace: kube-system +spec: + containers: + - command: + - kube-scheduler + - --address=127.0.0.1 + - --leader-elect=true + - --kubeconfig=/etc/kubernetes/scheduler.conf + image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-scheduler + resources: + requests: + cpu: 100m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + hostNetwork: true + volumes: + - name: k8s + projected: + sources: + - secret: + name: scheduler.conf +status: {} +` + + testSchedulerDaemonSet = `metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-scheduler + name: self-hosted-kube-scheduler + namespace: kube-system +spec: + template: + metadata: + creationTimestamp: null + labels: + k8s-app: self-hosted-kube-scheduler + spec: + containers: + - command: + - kube-scheduler + - --address=127.0.0.1 + - --leader-elect=true + - --kubeconfig=/etc/kubernetes/scheduler.conf + image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.0 + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-scheduler + resources: + requests: + cpu: 100m + volumeMounts: + - mountPath: /etc/kubernetes + name: k8s + readOnly: true + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - name: k8s + projected: + sources: + - secret: + name: scheduler.conf + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 +` +) + +func TestBuildDaemonSet(t *testing.T) { + var tests = []struct { + component string + podBytes []byte + dsBytes []byte + }{ + { + component: kubeAPIServer, + podBytes: []byte(testAPIServerPod), + dsBytes: []byte(testAPIServerDaemonSet), + }, + { + component: kubeControllerManager, + podBytes: []byte(testControllerManagerPod), + dsBytes: []byte(testControllerManagerDaemonSet), + }, + { + component: kubeScheduler, + podBytes: []byte(testSchedulerPod), + dsBytes: []byte(testSchedulerDaemonSet), + }, + } + + for _, rt := range tests { + tempFile, err := createTempFileWithContent(rt.podBytes) + defer os.Remove(tempFile) + + podSpec, err := loadPodSpecFromFile(tempFile) + if err != nil { + t.Fatalf("couldn't load the specified Pod") + } + + cfg := &kubeadmapi.MasterConfiguration{} + ds := buildDaemonSet(cfg, rt.component, podSpec) + dsBytes, err := yaml.Marshal(ds) + if err != nil { + t.Fatalf("failed to marshal daemonset to YAML: %v", err) + } + + if !bytes.Equal(dsBytes, rt.dsBytes) { + t.Errorf("failed TestBuildDaemonSet:\nexpected:\n%s\nsaw:\n%s", rt.dsBytes, dsBytes) + } + } +} + +func TestLoadPodSpecFromFile(t *testing.T) { + tests := []struct { + content string + expectError bool + }{ + { + // Good YAML + content: ` +apiVersion: v1 +kind: Pod +metadata: + name: testpod +spec: + containers: + - image: gcr.io/google_containers/busybox +`, + expectError: false, + }, + { + // Good JSON + content: ` +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "testpod" + }, + "spec": { + "containers": [ + { + "image": "gcr.io/google_containers/busybox" + } + ] + } +}`, + expectError: false, + }, + { + // Bad PodSpec + content: ` +apiVersion: v1 +kind: Pod +metadata: + name: testpod +spec: + - image: gcr.io/google_containers/busybox +`, + expectError: true, + }, + } + + for _, rt := range tests { + tempFile, err := createTempFileWithContent([]byte(rt.content)) + defer os.Remove(tempFile) + + _, err = loadPodSpecFromFile(tempFile) + if (err != nil) != rt.expectError { + t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err) + } + } +} + +func createTempFileWithContent(content []byte) (string, error) { + tempFile, err := ioutil.TempFile("", "") + if err != nil { + return "", fmt.Errorf("cannot create temporary file: %v", err) + } + if _, err = tempFile.Write([]byte(content)); err != nil { + return "", fmt.Errorf("cannot save temporary file: %v", err) + } + if err = tempFile.Close(); err != nil { + return "", fmt.Errorf("cannot close temporary file: %v", err) + } + return tempFile.Name(), nil +} diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes.go new file mode 100644 index 00000000000..1e9a9336092 --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes.go @@ -0,0 +1,340 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "fmt" + "io/ioutil" + "path" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +type tlsKeyPair struct { + name string + cert string + key string +} + +func k8sSelfHostedVolumeMount() v1.VolumeMount { + return v1.VolumeMount{ + Name: "k8s", + MountPath: kubeadmapi.GlobalEnvParams.KubernetesDir, + ReadOnly: true, + } +} + +func apiServerProjectedVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume { + return v1.Volume{ + Name: "k8s", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.CACertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CACertName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CAKeyName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.APIServerCertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerCertName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKeyName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKubeletClientCertName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKubeletClientKeyName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.ServiceAccountKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPublicKeyName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPrivateKeyName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.FrontProxyCACertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyCACertName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.FrontProxyClientCertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyClientCertName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyClientKeyName), + }, + }, + }, + }, + }, + }, + }, + } +} + +func schedulerProjectedVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume { + return v1.Volume{ + Name: "k8s", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.SchedulerKubeConfigFileName, + }, + }, + }, + }, + }, + }, + } +} + +func controllerManagerProjectedVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume { + return v1.Volume{ + Name: "k8s", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.ControllerManagerKubeConfigFileName, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.CACertAndKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSCertKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CACertName), + }, + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CAKeyName), + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: kubeadmconstants.ServiceAccountKeyBaseName, + }, + Items: []v1.KeyToPath{ + { + Key: v1.TLSPrivateKeyKey, + Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPrivateKeyName), + }, + }, + }, + }, + }, + }, + }, + } +} + +func createTLSSecrets(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error { + for _, tlsKeyPair := range getTLSKeyPairs() { + secret, err := createTLSSecretFromFiles( + tlsKeyPair.name, + path.Join(cfg.CertificatesDir, tlsKeyPair.cert), + path.Join(cfg.CertificatesDir, tlsKeyPair.key), + ) + if err != nil { + return err + } + + if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil { + return err + } + fmt.Printf("[self-hosted] Created TLS secret %q from %s and %s\n", tlsKeyPair.name, tlsKeyPair.cert, tlsKeyPair.key) + } + + return nil +} + +func createOpaqueSecrets(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error { + files := []string{ + kubeadmconstants.SchedulerKubeConfigFileName, + kubeadmconstants.ControllerManagerKubeConfigFileName, + } + for _, file := range files { + secret, err := createOpaqueSecretFromFile( + file, + path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, file), + ) + if err != nil { + return err + } + + if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil { + return err + } + fmt.Printf("[self-hosted] Created secret %q\n", file) + } + + return nil +} + +func createTLSSecretFromFiles(secretName, crt, key string) (*v1.Secret, error) { + crtBytes, err := ioutil.ReadFile(crt) + if err != nil { + return nil, err + } + keyBytes, err := ioutil.ReadFile(key) + if err != nil { + return nil, err + } + + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: metav1.NamespaceSystem, + }, + Type: v1.SecretTypeTLS, + Data: map[string][]byte{ + v1.TLSCertKey: crtBytes, + v1.TLSPrivateKeyKey: keyBytes, + }, + }, nil +} + +func createOpaqueSecretFromFile(secretName, file string) (*v1.Secret, error) { + fileBytes, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: metav1.NamespaceSystem, + }, + Type: v1.SecretTypeOpaque, + Data: map[string][]byte{ + path.Base(file): fileBytes, + }, + }, nil +} + +func getTLSKeyPairs() []*tlsKeyPair { + return []*tlsKeyPair{ + { + name: kubeadmconstants.CACertAndKeyBaseName, + cert: kubeadmconstants.CACertName, + key: kubeadmconstants.CAKeyName, + }, + { + name: kubeadmconstants.APIServerCertAndKeyBaseName, + cert: kubeadmconstants.APIServerCertName, + key: kubeadmconstants.APIServerKeyName, + }, + { + name: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, + cert: kubeadmconstants.APIServerKubeletClientCertName, + key: kubeadmconstants.APIServerKubeletClientKeyName, + }, + { + name: kubeadmconstants.ServiceAccountKeyBaseName, + cert: kubeadmconstants.ServiceAccountPublicKeyName, + key: kubeadmconstants.ServiceAccountPrivateKeyName, + }, + { + name: kubeadmconstants.FrontProxyCACertAndKeyBaseName, + cert: kubeadmconstants.FrontProxyCACertName, + key: kubeadmconstants.FrontProxyCAKeyName, + }, + { + name: kubeadmconstants.FrontProxyClientCertAndKeyBaseName, + cert: kubeadmconstants.FrontProxyClientCertName, + key: kubeadmconstants.FrontProxyClientKeyName, + }, + } +} diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes_test.go new file mode 100644 index 00000000000..bf2e51c1f16 --- /dev/null +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_volumes_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selfhosting + +import ( + "io/ioutil" + "log" + "os" + "testing" +) + +func createTemporaryFile(name string) *os.File { + content := []byte("foo") + tmpfile, err := ioutil.TempFile("", name) + if err != nil { + log.Fatal(err) + } + + if _, err := tmpfile.Write(content); err != nil { + log.Fatal(err) + } + + return tmpfile +} + +func TestCreateTLSSecretFromFile(t *testing.T) { + tmpCert := createTemporaryFile("foo.crt") + defer os.Remove(tmpCert.Name()) + tmpKey := createTemporaryFile("foo.key") + defer os.Remove(tmpKey.Name()) + + _, err := createTLSSecretFromFiles("foo", tmpCert.Name(), tmpKey.Name()) + if err != nil { + log.Fatal(err) + } + + if err := tmpCert.Close(); err != nil { + log.Fatal(err) + } + + if err := tmpKey.Close(); err != nil { + log.Fatal(err) + } +} + +func TestCreateOpaqueSecretFromFile(t *testing.T) { + tmpFile := createTemporaryFile("foo") + defer os.Remove(tmpFile.Name()) + + _, err := createOpaqueSecretFromFile("foo", tmpFile.Name()) + if err != nil { + log.Fatal(err) + } + + if err := tmpFile.Close(); err != nil { + log.Fatal(err) + } +} diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index c3fcafba251..99c53133b98 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -13,15 +13,18 @@ go_library( srcs = ["checks.go"], tags = ["automanaged"], deps = [ + "//cmd/kube-apiserver/app/options:go_default_library", + "//cmd/kube-controller-manager/app/options:go_default_library", "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//pkg/api/validation:go_default_library", "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/util/initsystem:go_default_library", - "//pkg/util/node:go_default_library", + "//plugin/cmd/kube-scheduler/app/options:go_default_library", "//test/e2e_node/system:go_default_library", "//vendor/github.com/PuerkitoBio/purell:go_default_library", "//vendor/github.com/blang/semver:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", ], ) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index d55050ffd14..0d5b080170c 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -35,15 +35,18 @@ import ( "github.com/PuerkitoBio/purell" "github.com/blang/semver" + "github.com/spf13/pflag" "net/url" + apiservoptions "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + cmoptions "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/pkg/api/validation" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/util/initsystem" - "k8s.io/kubernetes/pkg/util/node" + schoptions "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" "k8s.io/kubernetes/test/e2e_node/system" ) @@ -267,21 +270,22 @@ func (ipc InPathCheck) Check() (warnings, errors []error) { // HostnameCheck checks if hostname match dns sub domain regex. // If hostname doesn't match this regex, kubelet will not launch static pods like kube-apiserver/kube-controller-manager and so on. -type HostnameCheck struct{} +type HostnameCheck struct { + nodeName string +} func (hc HostnameCheck) Check() (warnings, errors []error) { errors = []error{} warnings = []error{} - hostname := node.GetHostname("") - for _, msg := range validation.ValidateNodeName(hostname, false) { - errors = append(errors, fmt.Errorf("hostname \"%s\" %s", hostname, msg)) + for _, msg := range validation.ValidateNodeName(hc.nodeName, false) { + errors = append(errors, fmt.Errorf("hostname \"%s\" %s", hc.nodeName, msg)) } - addr, err := net.LookupHost(hostname) + addr, err := net.LookupHost(hc.nodeName) if addr == nil { - warnings = append(warnings, fmt.Errorf("hostname \"%s\" could not be reached", hostname)) + warnings = append(warnings, fmt.Errorf("hostname \"%s\" could not be reached", hc.nodeName)) } if err != nil { - warnings = append(warnings, fmt.Errorf("hostname \"%s\" %s", hostname, err)) + warnings = append(warnings, fmt.Errorf("hostname \"%s\" %s", hc.nodeName, err)) } return warnings, errors } @@ -313,6 +317,46 @@ func (hst HTTPProxyCheck) Check() (warnings, errors []error) { return nil, nil } +// ExtraArgsCheck checks if arguments are valid. +type ExtraArgsCheck struct { + APIServerExtraArgs map[string]string + ControllerManagerExtraArgs map[string]string + SchedulerExtraArgs map[string]string +} + +func (eac ExtraArgsCheck) Check() (warnings, errors []error) { + argsCheck := func(name string, args map[string]string, f *pflag.FlagSet) []error { + errs := []error{} + for k, v := range args { + if err := f.Set(k, v); err != nil { + errs = append(errs, fmt.Errorf("%s: failed to parse extra argument --%s=%s", name, k, v)) + } + } + return errs + } + + warnings = []error{} + if len(eac.APIServerExtraArgs) > 0 { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + s := apiservoptions.NewServerRunOptions() + s.AddFlags(flags) + warnings = append(warnings, argsCheck("kube-apiserver", eac.APIServerExtraArgs, flags)...) + } + if len(eac.ControllerManagerExtraArgs) > 0 { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + s := cmoptions.NewCMServer() + s.AddFlags(flags, []string{}, []string{}) + warnings = append(warnings, argsCheck("kube-controller-manager", eac.ControllerManagerExtraArgs, flags)...) + } + if len(eac.SchedulerExtraArgs) > 0 { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + s := schoptions.NewSchedulerServer() + s.AddFlags(flags) + warnings = append(warnings, argsCheck("kube-scheduler", eac.SchedulerExtraArgs, flags)...) + } + return warnings, nil +} + type SystemVerificationCheck struct{} func (sysver SystemVerificationCheck) Check() (warnings, errors []error) { @@ -493,7 +537,7 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error { checks := []Checker{ SystemVerificationCheck{}, IsRootCheck{}, - HostnameCheck{}, + HostnameCheck{nodeName: cfg.NodeName}, ServiceCheck{Service: "kubelet", CheckIfActive: false}, ServiceCheck{Service: "docker", CheckIfActive: true}, FirewalldCheck{ports: []int{int(cfg.API.BindPort), 10250}}, @@ -514,6 +558,11 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error { InPathCheck{executable: "socat", mandatory: false}, InPathCheck{executable: "tc", mandatory: false}, InPathCheck{executable: "touch", mandatory: false}, + ExtraArgsCheck{ + APIServerExtraArgs: cfg.APIServerExtraArgs, + ControllerManagerExtraArgs: cfg.ControllerManagerExtraArgs, + SchedulerExtraArgs: cfg.SchedulerExtraArgs, + }, } if len(cfg.Etcd.Endpoints) == 0 { diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index f575b8d8238..f790f354291 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -239,6 +239,17 @@ func TestRunChecks(t *testing.T) { {[]Checker{FileContentCheck{Path: "/", Content: []byte("does not exist")}}, false, ""}, {[]Checker{InPathCheck{executable: "foobarbaz"}}, true, "[preflight] WARNING: foobarbaz not found in system path\n"}, {[]Checker{InPathCheck{executable: "foobarbaz", mandatory: true}}, false, ""}, + {[]Checker{ExtraArgsCheck{ + APIServerExtraArgs: map[string]string{"secure-port": "1234"}, + ControllerManagerExtraArgs: map[string]string{"use-service-account-credentials": "true"}, + SchedulerExtraArgs: map[string]string{"leader-elect": "true"}, + }}, true, ""}, + {[]Checker{ExtraArgsCheck{ + APIServerExtraArgs: map[string]string{"secure-port": "foo"}, + }}, true, "[preflight] WARNING: kube-apiserver: failed to parse extra argument --secure-port=foo\n"}, + {[]Checker{ExtraArgsCheck{ + APIServerExtraArgs: map[string]string{"invalid-argument": "foo"}, + }}, true, "[preflight] WARNING: kube-apiserver: failed to parse extra argument --invalid-argument=foo\n"}, } for _, rt := range tokenTest { buf := new(bytes.Buffer) diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index aba1277a54a..c0872062fbc 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -11,14 +11,21 @@ load( go_library( name = "go_default_library", srcs = [ + "apiclient.go", "error.go", "template.go", "version.go", ], tags = ["automanaged"], deps = [ + "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", + "//cmd/kubeadm/app/util/kubeconfig:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) @@ -45,6 +52,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//cmd/kubeadm/app/util/config:all-srcs", "//cmd/kubeadm/app/util/kubeconfig:all-srcs", "//cmd/kubeadm/app/util/token:all-srcs", ], diff --git a/cmd/kubeadm/app/master/apiclient.go b/cmd/kubeadm/app/util/apiclient.go similarity index 55% rename from cmd/kubeadm/app/master/apiclient.go rename to cmd/kubeadm/app/util/apiclient.go index 1f27fc08913..e351490a9a9 100644 --- a/cmd/kubeadm/app/master/apiclient.go +++ b/cmd/kubeadm/app/util/apiclient.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package master +package util import ( "fmt" "net/http" "time" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" ) +// CreateClientAndWaitForAPI takes a path to a kubeconfig file, makes a client of it and waits for the API to be healthy func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) { client, err := kubeconfigutil.ClientSetFromFile(file) if err != nil { @@ -39,6 +42,7 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) { return client, nil } +// WaitForAPI waits for the API Server's /healthz endpoint to report "ok" func WaitForAPI(client *clientset.Clientset) { start := time.Now() wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { @@ -52,3 +56,30 @@ func WaitForAPI(client *clientset.Clientset) { return true, nil }) } + +// WaitForPodsWithLabel will lookup pods with the given label and wait until they are all +// reporting status as running. +func WaitForPodsWithLabel(client *clientset.Clientset, labelKeyValPair string) { + // TODO: Implement a timeout + // TODO: Implement a verbosity switch + wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { + listOpts := metav1.ListOptions{LabelSelector: labelKeyValPair} + apiPods, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) + if err != nil { + fmt.Printf("[apiclient] Error getting Pods with label selector %q [%v]\n", labelKeyValPair, err) + return false, nil + } + + if len(apiPods.Items) == 0 { + return false, nil + } + for _, pod := range apiPods.Items { + fmt.Printf("[apiclient] Pod %s status: %s\n", pod.Name, pod.Status.Phase) + if pod.Status.Phase != v1.PodRunning { + return false, nil + } + } + + return true, nil + }) +} diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD new file mode 100644 index 00000000000..cb06b9973c4 --- /dev/null +++ b/cmd/kubeadm/app/util/config/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["masterconfig.go"], + tags = ["automanaged"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/token:go_default_library", + "//pkg/api:go_default_library", + "//pkg/util/node:go_default_library", + "//pkg/util/version:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["masterconfig_test.go"], + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cmd/kubeadm/app/cmd/defaults.go b/cmd/kubeadm/app/util/config/masterconfig.go similarity index 74% rename from cmd/kubeadm/app/cmd/defaults.go rename to cmd/kubeadm/app/util/config/masterconfig.go index 937d5b8bdf9..f8b36532cc3 100644 --- a/cmd/kubeadm/app/cmd/defaults.go +++ b/cmd/kubeadm/app/util/config/masterconfig.go @@ -18,17 +18,21 @@ package cmd import ( "fmt" + "io/ioutil" "net" + "k8s.io/apimachinery/pkg/runtime" netutil "k8s.io/apimachinery/pkg/util/net" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/version" ) -func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { +func SetInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { // Choose the right address for the API Server to advertise. If the advertise address is localhost or 0.0.0.0, the default interface's IP address is used // This is the same logic as the API Server uses @@ -54,15 +58,6 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { return fmt.Errorf("this version of kubeadm only supports deploying clusters with the control plane version >= %s. Current version: %s", kubeadmconstants.MinimumControlPlaneVersion.String(), cfg.KubernetesVersion) } - fmt.Printf("[init] Using Kubernetes version: %s\n", cfg.KubernetesVersion) - fmt.Printf("[init] Using Authorization modes: %v\n", cfg.AuthorizationModes) - - // Warn about the limitations with the current cloudprovider solution. - if cfg.CloudProvider != "" { - fmt.Println("[init] WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.") - fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)") - } - if cfg.Token == "" { var err error cfg.Token, err = tokenutil.GenerateToken() @@ -71,5 +66,23 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { } } + cfg.NodeName = node.GetHostname(cfg.NodeName) + + return nil +} + +// TryLoadMasterConfiguration tries to loads a Master configuration from the given file (if defined) +func TryLoadMasterConfiguration(cfgPath string, cfg *kubeadmapi.MasterConfiguration) error { + + if cfgPath != "" { + b, err := ioutil.ReadFile(cfgPath) + if err != nil { + return fmt.Errorf("unable to read config from %q [%v]", cfgPath, err) + } + if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), b, cfg); err != nil { + return fmt.Errorf("unable to decode config from %q [%v]", cfgPath, err) + } + } + return nil } diff --git a/cmd/kubeadm/app/cmd/defaults_test.go b/cmd/kubeadm/app/util/config/masterconfig_test.go similarity index 100% rename from cmd/kubeadm/app/cmd/defaults_test.go rename to cmd/kubeadm/app/util/config/masterconfig_test.go diff --git a/cmd/kubeadm/app/util/version.go b/cmd/kubeadm/app/util/version.go index 8f2bf7d44ef..ae442701aa0 100644 --- a/cmd/kubeadm/app/util/version.go +++ b/cmd/kubeadm/app/util/version.go @@ -26,7 +26,7 @@ import ( var ( kubeReleaseBucketURL = "https://storage.googleapis.com/kubernetes-release/release" - kubeReleaseRegex = regexp.MustCompile(`^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$`) + kubeReleaseRegex = regexp.MustCompile(`^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$`) kubeReleaseLabelRegex = regexp.MustCompile(`^[[:lower:]]+(-[-\w_\.]+)?$`) ) @@ -49,7 +49,10 @@ var ( // latest-1.0 (and similarly 1.1, 1.2, 1.3, ...) func KubernetesReleaseVersion(version string) (string, error) { if kubeReleaseRegex.MatchString(version) { - return version, nil + if strings.HasPrefix(version, "v") { + return version, nil + } + return "v" + version, nil } else if kubeReleaseLabelRegex.MatchString(version) { url := fmt.Sprintf("%s/%s.txt", kubeReleaseBucketURL, version) resp, err := http.Get(url) @@ -69,3 +72,14 @@ func KubernetesReleaseVersion(version string) (string, error) { } return "", fmt.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version) } + +// KubernetesVersionToImageTag is helper function that replaces all +// non-allowed symbols in tag strings with underscores. +// Image tag can only contain lowercase and uppercase letters, digits, +// underscores, periods and dashes. +// Current usage is for CI images where all of symbols except '+' are valid, +// but function is for generic usage where input can't be always pre-validated. +func KubernetesVersionToImageTag(version string) string { + allowed := regexp.MustCompile(`[^-a-zA-Z0-9_\.]`) + return allowed.ReplaceAllString(version, "_") +} diff --git a/cmd/kubeadm/app/util/version_test.go b/cmd/kubeadm/app/util/version_test.go index 418f7b0dc1f..491ea24344a 100644 --- a/cmd/kubeadm/app/util/version_test.go +++ b/cmd/kubeadm/app/util/version_test.go @@ -45,6 +45,7 @@ func TestValidVersion(t *testing.T) { "v1.6.0-alpha.0.536+d60d9f3269288f", "v1.5.0-alpha.0.1078+1044b6822497da-pull", "v1.5.0-alpha.1.822+49b9e32fad9f32-pull-gke-gci", + "v1.6.1_coreos.0", } for _, s := range validVersions { ver, err := KubernetesReleaseVersion(s) @@ -61,8 +62,9 @@ func TestValidVersion(t *testing.T) { func TestInvalidVersion(t *testing.T) { invalidVersions := []string{ "v1.3", - "1.4.0", - "1.4.5+git", + "1.4", + "b1.4.0", + "c1.4.5+git", "something1.2", } for _, s := range invalidVersions { @@ -77,6 +79,24 @@ func TestInvalidVersion(t *testing.T) { } } +func TestValidConvenientForUserVersion(t *testing.T) { + validVersions := []string{ + "1.4.0", + "1.4.5+git", + "1.6.1_coreos.0", + } + for _, s := range validVersions { + ver, err := KubernetesReleaseVersion(s) + t.Log("Valid: ", s, ver, err) + if err != nil { + t.Errorf("KubernetesReleaseVersion unexpected error for version %q: %v", s, err) + } + if ver != "v"+s { + t.Errorf("KubernetesReleaseVersion should return semantic version string. %q vs. %q", s, ver) + } + } +} + func TestVersionFromNetwork(t *testing.T) { type T struct { Content string @@ -120,3 +140,28 @@ func TestVersionFromNetwork(t *testing.T) { } } } + +func TestVersionToTag(t *testing.T) { + type T struct { + input string + expected string + } + cases := []T{ + // NOP + {"", ""}, + // Official releases + {"v1.0.0", "v1.0.0"}, + // CI or custom builds + {"v10.1.2-alpha.1.100+0123456789abcdef+SOMETHING", "v10.1.2-alpha.1.100_0123456789abcdef_SOMETHING"}, + // random and invalid input: should return safe value + {"v1,0!0+üñµ", "v1_0_0____"}, + } + + for _, tc := range cases { + tag := KubernetesVersionToImageTag(tc.input) + t.Logf("KubernetesVersionToImageTag: Input: %q. Result: %q. Expected: %q", tc.input, tag, tc.expected) + if tag != tc.expected { + t.Errorf("failed KubernetesVersionToImageTag: Input: %q. Result: %q. Expected: %q", tc.input, tag, tc.expected) + } + } +} diff --git a/cmd/kubeadm/test/cmd/init_test.go b/cmd/kubeadm/test/cmd/init_test.go index be6a8c3e832..9ce3b95daac 100644 --- a/cmd/kubeadm/test/cmd/init_test.go +++ b/cmd/kubeadm/test/cmd/init_test.go @@ -137,3 +137,31 @@ func TestCmdInitAPIPort(t *testing.T) { kubeadmReset() } } + +func TestCmdInitArgsMixed(t *testing.T) { + if *kubeadmCmdSkip { + t.Log("kubeadm cmd tests being skipped") + t.Skip() + } + + var initTest = []struct { + args string + expected bool + }{ + {"--api-port=1000 --config=/etc/kubernets/kubeadm.config", false}, + } + + for _, rt := range initTest { + _, _, actual := RunCmd(*kubeadmPath, "init", rt.args, "--skip-preflight-checks") + if (actual == nil) != rt.expected { + t.Errorf( + "failed CmdInitArgsMixed running 'kubeadm init %s' with an error: %v\n\texpected: %t\n\t actual: %t", + rt.args, + actual, + rt.expected, + (actual == nil), + ) + } + kubeadmReset() + } +} diff --git a/cmd/kubeadm/test/cmd/join_test.go b/cmd/kubeadm/test/cmd/join_test.go index 5e82b2accb8..5d73178fe57 100644 --- a/cmd/kubeadm/test/cmd/join_test.go +++ b/cmd/kubeadm/test/cmd/join_test.go @@ -105,6 +105,34 @@ func TestCmdJoinDiscoveryToken(t *testing.T) { } } +func TestCmdJoinNodeName(t *testing.T) { + if *kubeadmCmdSkip { + t.Log("kubeadm cmd tests being skipped") + t.Skip() + } + + var initTest = []struct { + args string + expected bool + }{ + {"--node-name=foobar", false}, + } + + for _, rt := range initTest { + _, _, actual := RunCmd(*kubeadmPath, "join", rt.args, "--skip-preflight-checks") + if (actual == nil) != rt.expected { + t.Errorf( + "failed CmdJoinNodeName running 'kubeadm join %s' with an error: %v\n\texpected: %t\n\t actual: %t", + rt.args, + actual, + rt.expected, + (actual == nil), + ) + } + kubeadmReset() + } +} + func TestCmdJoinTLSBootstrapToken(t *testing.T) { if *kubeadmCmdSkip { t.Log("kubeadm cmd tests being skipped") @@ -191,3 +219,31 @@ func TestCmdJoinBadArgs(t *testing.T) { kubeadmReset() } } + +func TestCmdJoinArgsMixed(t *testing.T) { + if *kubeadmCmdSkip { + t.Log("kubeadm cmd tests being skipped") + t.Skip() + } + + var initTest = []struct { + args string + expected bool + }{ + {"--discovery-token=abcdef.1234567890abcdef --config=/etc/kubernets/kubeadm.config", false}, + } + + for _, rt := range initTest { + _, _, actual := RunCmd(*kubeadmPath, "join", rt.args, "--skip-preflight-checks") + if (actual == nil) != rt.expected { + t.Errorf( + "failed CmdJoinArgsMixed running 'kubeadm join %s' with an error: %v\n\texpected: %t\n\t actual: %t", + rt.args, + actual, + rt.expected, + (actual == nil), + ) + } + kubeadmReset() + } +} diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index 8744422e8f8..85bcffab74e 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -107,7 +107,6 @@ go_library( "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/golang.org/x/exp/inotify:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 82898919efc..4a5ccf8141d 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -19,8 +19,6 @@ package app import ( "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" "errors" "fmt" "math/rand" @@ -37,7 +35,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" clientv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -420,13 +417,15 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) { } if kubeDeps == nil { - var kubeClient clientset.Interface - var eventClient v1core.EventsGetter - var externalKubeClient clientgoclientset.Interface - var cloud cloudprovider.Interface + kubeDeps, err = UnsecuredKubeletDeps(s) + if err != nil { + return err + } + } + if kubeDeps.Cloud == nil { if !cloudprovider.IsExternal(s.CloudProvider) && s.CloudProvider != componentconfigv1alpha1.AutoDetectCloudProvider { - cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) + cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err } @@ -435,29 +434,33 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) { } else { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) } + kubeDeps.Cloud = cloud } + } - nodeName, err := getNodeName(cloud, nodeutil.GetHostname(s.HostnameOverride)) - if err != nil { + nodeName, err := getNodeName(kubeDeps.Cloud, nodeutil.GetHostname(s.HostnameOverride)) + if err != nil { + return err + } + + if s.BootstrapKubeconfig != "" { + if err := bootstrapClientCert(s.KubeConfig.Value(), s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil { return err } + } - if s.BootstrapKubeconfig != "" { - if err := bootstrapClientCert(s.KubeConfig.Value(), s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil { - return err - } - } + // initialize clients if any of the clients are not provided + if kubeDeps.KubeClient == nil || kubeDeps.ExternalKubeClient == nil || kubeDeps.EventClient == nil { + var kubeClient clientset.Interface + var eventClient v1core.EventsGetter + var externalKubeClient clientgoclientset.Interface clientConfig, err := CreateAPIServerClientConfig(s) var clientCertificateManager certificate.Manager if err == nil { if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletClientCertificate) { - nodeName, err := getNodeName(cloud, nodeutil.GetHostname(s.HostnameOverride)) - if err != nil { - return err - } - clientCertificateManager, err = initializeClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile) + clientCertificateManager, err = certificate.NewKubeletClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile) if err != nil { return err } @@ -487,32 +490,21 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) { glog.Warningf("Failed to create API Server client: %v", err) } } else { - if s.RequireKubeConfig { + switch { + case s.RequireKubeConfig: return fmt.Errorf("invalid kubeconfig: %v", err) - } else if s.KubeConfig.Provided() && !standaloneMode { + case standaloneMode: + glog.Warningf("No API client: %v", err) + case s.KubeConfig.Provided(): glog.Warningf("Invalid kubeconfig: %v", err) } - if standaloneMode { - glog.Warningf("No API client: %v", err) - } } - kubeDeps, err = UnsecuredKubeletDeps(s) - if err != nil { - return err - } - - kubeDeps.Cloud = cloud kubeDeps.KubeClient = kubeClient kubeDeps.ExternalKubeClient = externalKubeClient kubeDeps.EventClient = eventClient } - nodeName, err := getNodeName(kubeDeps.Cloud, nodeutil.GetHostname(s.HostnameOverride)) - if err != nil { - return err - } - if kubeDeps.Auth == nil { auth, err := BuildAuth(nodeName, kubeDeps.ExternalKubeClient, s.KubeletConfiguration) if err != nil { @@ -660,52 +652,6 @@ func updateTransport(clientConfig *restclient.Config, clientCertificateManager c return nil } -// initializeClientCertificateManager sets up a certificate manager without a -// client that can be used to sign new certificates (or rotate). It answers with -// whatever certificate it is initialized with. If a CSR client is set later, it -// may begin rotating/renewing the client cert -func initializeClientCertificateManager(certDirectory string, nodeName types.NodeName, certData []byte, keyData []byte, certFile string, keyFile string) (certificate.Manager, error) { - certificateStore, err := certificate.NewFileStore( - "kubelet-client", - certDirectory, - certDirectory, - certFile, - keyFile) - if err != nil { - return nil, fmt.Errorf("failed to initialize certificate store: %v", err) - } - clientCertificateManager, err := certificate.NewManager(&certificate.Config{ - Template: &x509.CertificateRequest{ - Subject: pkix.Name{ - Organization: []string{"system:nodes"}, - CommonName: fmt.Sprintf("system:node:%s", nodeName), - }, - }, - Usages: []certificates.KeyUsage{ - // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // - // DigitalSignature allows the certificate to be used to verify - // digital signatures including signatures used during TLS - // negotiation. - certificates.UsageDigitalSignature, - // KeyEncipherment allows the cert/key pair to be used to encrypt - // keys, including the symetric keys negotiated during TLS setup - // and used for data transfer.. - certificates.UsageKeyEncipherment, - // ClientAuth allows the cert to be used by a TLS client to - // authenticate itself to the TLS server. - certificates.UsageClientAuth, - }, - CertificateStore: certificateStore, - BootstrapCertificatePEM: certData, - BootstrapKeyPEM: keyData, - }) - if err != nil { - return nil, fmt.Errorf("failed to initialize certificate manager: %v", err) - } - return clientCertificateManager, nil -} - // getNodeName returns the node name according to the cloud provider // if cloud provider is specified. Otherwise, returns the hostname of the node. func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) { diff --git a/cmd/libs/go2idl/client-gen/test_apis/testgroup/register.go b/cmd/libs/go2idl/client-gen/test_apis/testgroup/register.go index df25d849b1c..b04486dae75 100644 --- a/cmd/libs/go2idl/client-gen/test_apis/testgroup/register.go +++ b/cmd/libs/go2idl/client-gen/test_apis/testgroup/register.go @@ -38,6 +38,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion) return nil } - -func (obj *TestType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *TestTypeList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1/register.go b/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1/register.go index 98c057c42e6..dbf299f0dfe 100644 --- a/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1/register.go +++ b/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1/register.go @@ -52,6 +52,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *TestType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *TestTypeList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/docs/.generated_docs b/docs/.generated_docs index 1b82fae7c74..8d806a5bee4 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -1,4 +1,5 @@ docs/.generated_docs +docs/admin/cloud-controller-manager.md docs/admin/federation-apiserver.md docs/admin/federation-controller-manager.md docs/admin/kube-apiserver.md @@ -6,6 +7,7 @@ docs/admin/kube-controller-manager.md docs/admin/kube-proxy.md docs/admin/kube-scheduler.md docs/admin/kubelet.md +docs/man/man1/cloud-controller-manager.1 docs/man/man1/kube-apiserver.1 docs/man/man1/kube-controller-manager.1 docs/man/man1/kube-proxy.1 diff --git a/docs/admin/cloud-controller-manager.md b/docs/admin/cloud-controller-manager.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/cloud-controller-manager.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index 9b95b7fa0f7..ad065bdb75a 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -382,12 +382,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

v1beta1.Scale

  • -

    v1beta1.ThirdPartyResource

    -
  • -
  • -

    v1beta1.ThirdPartyResourceList

    -
  • -
  • v1beta1.DaemonSetList

  • @@ -3252,40 +3246,6 @@ When an object is created, the system will populate this list with the current s - -
    -

    v1beta1.APIVersion

    -
    -

    An APIVersion represents a single concrete version of an object model.

    -
    - ------- - - - - - - - - - - - - - - - - - - -
    NameDescriptionRequiredSchemaDefault

    name

    Name of this version (e.g. v1).

    false

    string

    -

    v1beta1.SupplementalGroupsStrategyOptions

    @@ -5817,68 +5777,6 @@ Examples:
    -
    -
    -

    v1beta1.ThirdPartyResource

    -
    -

    A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.

    -
    - ------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameDescriptionRequiredSchemaDefault

    kind

    Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds

    false

    string

    apiVersion

    APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources

    false

    string

    metadata

    Standard object metadata

    false

    v1.ObjectMeta

    description

    Description is the description of this object.

    false

    string

    versions

    Versions are versions for this third party object

    false

    v1beta1.APIVersion array

    -

    v1.DeletionPropagation

    @@ -8152,61 +8050,6 @@ Both these may change in the future. Incoming requests are matched against the h -
    -
    -

    v1beta1.ThirdPartyResourceList

    -
    -

    ThirdPartyResourceList is a list of ThirdPartyResources.

    -
    - ------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameDescriptionRequiredSchemaDefault

    kind

    Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds

    false

    string

    apiVersion

    APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources

    false

    string

    metadata

    Standard list metadata.

    false

    v1.ListMeta

    items

    Items is the list of ThirdPartyResources.

    true

    v1beta1.ThirdPartyResource array

    -

    v1.AzureDataDiskCachingMode

    diff --git a/docs/api-reference/extensions/v1beta1/operations.html b/docs/api-reference/extensions/v1beta1/operations.html index d51388f77f6..acd272ffe73 100755 --- a/docs/api-reference/extensions/v1beta1/operations.html +++ b/docs/api-reference/extensions/v1beta1/operations.html @@ -9886,10 +9886,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    list or watch objects of kind ThirdPartyResource

    +

    watch individual changes to a list of DaemonSet

    -
    GET /apis/extensions/v1beta1/thirdpartyresources
    +
    GET /apis/extensions/v1beta1/watch/daemonsets
    @@ -9993,7 +9993,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

    200

    success

    -

    v1beta1.ThirdPartyResourceList

    +

    v1.WatchEvent

    @@ -10043,10 +10043,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    delete collection of ThirdPartyResource

    +

    watch individual changes to a list of Deployment

    -
    DELETE /apis/extensions/v1beta1/thirdpartyresources
    +
    GET /apis/extensions/v1beta1/watch/deployments
    @@ -10150,7 +10150,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

    200

    success

    -

    v1.Status

    +

    v1.WatchEvent

    @@ -10179,6 +10179,12 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
  • application/vnd.kubernetes.protobuf

  • +
  • +

    application/json;stream=watch

    +
  • +
  • +

    application/vnd.kubernetes.protobuf;stream=watch

    +
  • @@ -10194,10 +10200,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    create a ThirdPartyResource

    +

    watch individual changes to a list of Ingress

    -
    POST /apis/extensions/v1beta1/thirdpartyresources
    +
    GET /apis/extensions/v1beta1/watch/ingresses
    @@ -10231,11 +10237,51 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } -

    BodyParameter

    -

    body

    +

    QueryParameter

    +

    labelSelector

    +

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    +

    false

    +

    string

    -

    true

    -

    v1beta1.ThirdPartyResource

    + + +

    QueryParameter

    +

    fieldSelector

    +

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    +

    false

    +

    string

    + + + +

    QueryParameter

    +

    includeUninitialized

    +

    If true, partially initialized resources are included in the response.

    +

    false

    +

    boolean

    + + + +

    QueryParameter

    +

    watch

    +

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    +

    false

    +

    boolean

    + + + +

    QueryParameter

    +

    resourceVersion

    +

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    +

    false

    +

    string

    + + + +

    QueryParameter

    +

    timeoutSeconds

    +

    Timeout for the list/watch call.

    +

    false

    +

    integer (int32)

    @@ -10261,7 +10307,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

    200

    success

    -

    v1beta1.ThirdPartyResource

    +

    v1.WatchEvent

    @@ -10290,6 +10336,12 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
  • application/vnd.kubernetes.protobuf

  • +
  • +

    application/json;stream=watch

    +
  • +
  • +

    application/vnd.kubernetes.protobuf;stream=watch

    +
  • @@ -10305,991 +10357,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    read the specified ThirdPartyResource

    -
    -
    -
    GET /apis/extensions/v1beta1/thirdpartyresources/{name}
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    export

    Should this value be exported. Export strips fields that a user can not specify.

    false

    boolean

    QueryParameter

    exact

    Should the export be exact. Exact export maintains cluster-specific fields like Namespace.

    false

    boolean

    PathParameter

    name

    name of the ThirdPartyResource

    true

    string

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1beta1.ThirdPartyResource

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    replace the specified ThirdPartyResource

    -
    -
    -
    PUT /apis/extensions/v1beta1/thirdpartyresources/{name}
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    BodyParameter

    body

    true

    v1beta1.ThirdPartyResource

    PathParameter

    name

    name of the ThirdPartyResource

    true

    string

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1beta1.ThirdPartyResource

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    delete a ThirdPartyResource

    -
    -
    -
    DELETE /apis/extensions/v1beta1/thirdpartyresources/{name}
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    BodyParameter

    body

    true

    v1.DeleteOptions

    QueryParameter

    gracePeriodSeconds

    The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.

    false

    integer (int32)

    QueryParameter

    orphanDependents

    Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object’s finalizers list. Either this field or PropagationPolicy may be set, but not both.

    false

    boolean

    QueryParameter

    propagationPolicy

    Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.

    false

    string

    PathParameter

    name

    name of the ThirdPartyResource

    true

    string

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.Status

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    partially update the specified ThirdPartyResource

    -
    -
    -
    PATCH /apis/extensions/v1beta1/thirdpartyresources/{name}
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    BodyParameter

    body

    true

    v1.Patch

    PathParameter

    name

    name of the ThirdPartyResource

    true

    string

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1beta1.ThirdPartyResource

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      application/json-patch+json

      -
    • -
    • -

      application/merge-patch+json

      -
    • -
    • -

      application/strategic-merge-patch+json

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    watch individual changes to a list of DaemonSet

    -
    -
    -
    GET /apis/extensions/v1beta1/watch/daemonsets
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    labelSelector

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    false

    string

    QueryParameter

    fieldSelector

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    false

    string

    QueryParameter

    includeUninitialized

    If true, partially initialized resources are included in the response.

    false

    boolean

    QueryParameter

    watch

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    false

    boolean

    QueryParameter

    resourceVersion

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    false

    string

    QueryParameter

    timeoutSeconds

    Timeout for the list/watch call.

    false

    integer (int32)

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.WatchEvent

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    • -

      application/json;stream=watch

      -
    • -
    • -

      application/vnd.kubernetes.protobuf;stream=watch

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    watch individual changes to a list of Deployment

    -
    -
    -
    GET /apis/extensions/v1beta1/watch/deployments
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    labelSelector

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    false

    string

    QueryParameter

    fieldSelector

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    false

    string

    QueryParameter

    includeUninitialized

    If true, partially initialized resources are included in the response.

    false

    boolean

    QueryParameter

    watch

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    false

    boolean

    QueryParameter

    resourceVersion

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    false

    string

    QueryParameter

    timeoutSeconds

    Timeout for the list/watch call.

    false

    integer (int32)

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.WatchEvent

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    • -

      application/json;stream=watch

      -
    • -
    • -

      application/vnd.kubernetes.protobuf;stream=watch

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    watch individual changes to a list of Ingress

    -
    -
    -
    GET /apis/extensions/v1beta1/watch/ingresses
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    labelSelector

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    false

    string

    QueryParameter

    fieldSelector

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    false

    string

    QueryParameter

    includeUninitialized

    If true, partially initialized resources are included in the response.

    false

    boolean

    QueryParameter

    watch

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    false

    boolean

    QueryParameter

    resourceVersion

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    false

    string

    QueryParameter

    timeoutSeconds

    Timeout for the list/watch call.

    false

    integer (int32)

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.WatchEvent

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    • -

      application/json;stream=watch

      -
    • -
    • -

      application/vnd.kubernetes.protobuf;stream=watch

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -

    watch individual changes to a list of DaemonSet

    @@ -11297,7 +10364,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -11387,7 +10454,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -11412,7 +10479,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -11422,7 +10489,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -11444,7 +10511,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -11462,7 +10529,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -11560,7 +10627,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -11585,7 +10652,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -11595,7 +10662,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -11617,7 +10684,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -11635,7 +10702,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -11725,7 +10792,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -11750,7 +10817,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -11760,7 +10827,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -11782,7 +10849,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -11800,7 +10867,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -11898,7 +10965,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -11923,7 +10990,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -11933,7 +11000,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -11955,7 +11022,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -11973,7 +11040,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12063,7 +11130,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12088,7 +11155,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12098,7 +11165,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12120,7 +11187,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12138,7 +11205,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12236,7 +11303,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12261,7 +11328,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12271,7 +11338,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12293,7 +11360,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12311,7 +11378,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12401,7 +11468,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12426,7 +11493,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12436,7 +11503,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12458,7 +11525,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12476,7 +11543,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12574,7 +11641,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12599,7 +11666,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12609,7 +11676,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12631,7 +11698,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12649,7 +11716,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12739,7 +11806,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12764,7 +11831,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12774,7 +11841,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12796,7 +11863,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12814,7 +11881,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -12912,7 +11979,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -12937,7 +12004,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -12947,7 +12014,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -12969,7 +12036,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -12987,7 +12054,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -13069,7 +12136,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -13094,7 +12161,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -13104,7 +12171,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -13126,7 +12193,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -13144,7 +12211,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -13226,7 +12293,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -13251,7 +12318,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -13261,7 +12328,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -13283,7 +12350,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -13301,7 +12368,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -13391,7 +12458,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -13416,7 +12483,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -13426,7 +12493,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -13448,7 +12515,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    +

    Tags

    • @@ -13466,7 +12533,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Parameters

    +

    Parameters

    @@ -13548,7 +12615,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Responses

    +

    Responses

    @@ -13573,7 +12640,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Consumes

    +

    Consumes

    • @@ -13583,7 +12650,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Produces

    +

    Produces

    • @@ -13605,329 +12672,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    - -
    -

    watch individual changes to a list of ThirdPartyResource

    -
    -
    -
    GET /apis/extensions/v1beta1/watch/thirdpartyresources
    -
    -
    -
    -

    Parameters

    -
    -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    labelSelector

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    false

    string

    QueryParameter

    fieldSelector

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    false

    string

    QueryParameter

    includeUninitialized

    If true, partially initialized resources are included in the response.

    false

    boolean

    QueryParameter

    watch

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    false

    boolean

    QueryParameter

    resourceVersion

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    false

    string

    QueryParameter

    timeoutSeconds

    Timeout for the list/watch call.

    false

    integer (int32)

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.WatchEvent

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    • -

      application/json;stream=watch

      -
    • -
    • -

      application/vnd.kubernetes.protobuf;stream=watch

      -
    • -
    -
    -
    -
    -

    Tags

    -
    -
      -
    • -

      apisextensionsv1beta1

      -
    • -
    -
    -
    -
    -
    -

    watch changes to an object of kind ThirdPartyResource

    -
    -
    -
    GET /apis/extensions/v1beta1/watch/thirdpartyresources/{name}
    -
    -
    -
    -

    Parameters

    - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeNameDescriptionRequiredSchemaDefault

    QueryParameter

    pretty

    If true, then the output is pretty printed.

    false

    string

    QueryParameter

    labelSelector

    A selector to restrict the list of returned objects by their labels. Defaults to everything.

    false

    string

    QueryParameter

    fieldSelector

    A selector to restrict the list of returned objects by their fields. Defaults to everything.

    false

    string

    QueryParameter

    includeUninitialized

    If true, partially initialized resources are included in the response.

    false

    boolean

    QueryParameter

    watch

    Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.

    false

    boolean

    QueryParameter

    resourceVersion

    When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.

    false

    string

    QueryParameter

    timeoutSeconds

    Timeout for the list/watch call.

    false

    integer (int32)

    PathParameter

    name

    name of the ThirdPartyResource

    true

    string

    - -
    -
    -

    Responses

    - ----- - - - - - - - - - - - - - - -
    HTTP CodeDescriptionSchema

    200

    success

    v1.WatchEvent

    - -
    -
    -

    Consumes

    -
    -
      -
    • -

      /

      -
    • -
    -
    -
    -
    -

    Produces

    -
    -
      -
    • -

      application/json

      -
    • -
    • -

      application/yaml

      -
    • -
    • -

      application/vnd.kubernetes.protobuf

      -
    • -
    • -

      application/json;stream=watch

      -
    • -
    • -

      application/vnd.kubernetes.protobuf;stream=watch

      -
    • -
    -
    -
    -
    -

    Tags

    +

    Tags

    • diff --git a/docs/man/man1/cloud-controller-manager.1 b/docs/man/man1/cloud-controller-manager.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/cloud-controller-manager.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/examples/README.md b/examples/README.md index 7bfd41116f2..6ea45f7007a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,29 +1 @@ -# Kubernetes Examples: releases.k8s.io/HEAD - -This directory contains a number of examples of how to run -real applications with Kubernetes. - -Demonstrations of how to use specific Kubernetes features can be found in our [documents](https://kubernetes.io/docs/). - - -### Maintained Examples - -Maintained Examples are expected to be updated with every Kubernetes -release, to use the latest and greatest features, current guidelines -and best practices, and to refresh command syntax, output, changed -prerequisites, as needed. - -|Name | Description | Notable Features Used | Complexity Level| -------------- | ------------- | ------------ | ------------ | -|[Guestbook](guestbook/) | PHP app with Redis | Replication Controller, Service | Beginner | -|[WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner| -|[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate - -* Note: Please add examples to the list above that are maintained. - -See [Example Guidelines](guidelines.md) for a description of what goes -in this directory, and what examples should contain. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/README.md](https://github.com/kubernetes/examples/blob/master/README.md) diff --git a/examples/cluster-dns/README.md b/examples/cluster-dns/README.md index 86d2aa9ab7c..c7b23c62710 100644 --- a/examples/cluster-dns/README.md +++ b/examples/cluster-dns/README.md @@ -1,182 +1 @@ -## Kubernetes DNS example - -This is a toy example demonstrating how to use kubernetes DNS. - -### Step Zero: Prerequisites - -This example assumes that you have forked the repository and [turned up a Kubernetes cluster](https://kubernetes.io/docs/getting-started-guides/). Make sure DNS is enabled in your setup, see [DNS doc](https://github.com/kubernetes/dns). - -```sh -$ cd kubernetes -$ hack/dev-build-and-up.sh -``` - -### Step One: Create two namespaces - -We'll see how cluster DNS works across multiple [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), first we need to create two namespaces: - -```sh -$ kubectl create -f examples/cluster-dns/namespace-dev.yaml -$ kubectl create -f examples/cluster-dns/namespace-prod.yaml -``` - -Now list all namespaces: - -```sh -$ kubectl get namespaces -NAME LABELS STATUS -default Active -development name=development Active -production name=production Active -``` - -For kubectl client to work with each namespace, we define two contexts: - -```sh -$ kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME} -$ kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME} -``` - -You can view your cluster name and user name in kubernetes config at ~/.kube/config. - -### Step Two: Create backend replication controller in each namespace - -Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/) in each namespace. - -```sh -$ kubectl config use-context dev -$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml -``` - -Once that's up you can list the pod in the cluster: - -```sh -$ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -dns-backend dns-backend ddysher/dns-backend name=dns-backend 1 -``` - -Now repeat the above commands to create a replication controller in prod namespace: - -```sh -$ kubectl config use-context prod -$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml -$ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -dns-backend dns-backend ddysher/dns-backend name=dns-backend 1 -``` - -### Step Three: Create backend service - -Use the file [`examples/cluster-dns/dns-backend-service.yaml`](dns-backend-service.yaml) to create -a [service](https://kubernetes.io/docs/concepts/services-networking/service/) for the backend server. - -```sh -$ kubectl config use-context dev -$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml -``` - -Once that's up you can list the service in the cluster: - -```sh -$ kubectl get service dns-backend -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -dns-backend 10.0.2.3 8000/TCP name=dns-backend 1d -``` - -Again, repeat the same process for prod namespace: - -```sh -$ kubectl config use-context prod -$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml -$ kubectl get service dns-backend -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -dns-backend 10.0.2.4 8000/TCP name=dns-backend 1d -``` - -### Step Four: Create client pod in one namespace - -Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`. - -```sh -$ kubectl config use-context dev -$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml -``` - -Once that's up you can list the pod in the cluster: - -```sh -$ kubectl get pods dns-frontend -NAME READY STATUS RESTARTS AGE -dns-frontend 0/1 ExitCode:0 0 1m -``` - -Wait until the pod succeeds, then we can see the output from the client pod: - -```sh -$ kubectl logs dns-frontend -2015-05-07T20:13:54.147664936Z 10.0.236.129 -2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000 -2015-05-07T20:13:54.147733438Z -2015-05-07T20:13:54.147738295Z Hello World! -``` - -Please refer to the [source code](images/frontend/client.py) about the log. First line prints out the ip address associated with the service in dev namespace; remaining lines print out our request and server response. - -If we switch to prod namespace with the same pod config, we'll see the same result, i.e. dns will resolve across namespace. - -```sh -$ kubectl config use-context prod -$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml -$ kubectl logs dns-frontend -2015-05-07T20:13:54.147664936Z 10.0.236.129 -2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000 -2015-05-07T20:13:54.147733438Z -2015-05-07T20:13:54.147738295Z Hello World! -``` - - -#### Note about default namespace - -If you prefer not using namespace, then all your services can be addressed using `default` namespace, e.g. `http://dns-backend.default.svc.cluster.local:8000`, or shorthand version `http://dns-backend:8000` - - -### tl; dr; - -For those of you who are impatient, here is the summary of the commands we ran in this tutorial. Remember to set first `$CLUSTER_NAME` and `$USER_NAME` to the values found in `~/.kube/config`. - -```sh -# create dev and prod namespaces -kubectl create -f examples/cluster-dns/namespace-dev.yaml -kubectl create -f examples/cluster-dns/namespace-prod.yaml - -# create two contexts -kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME} -kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME} - -# create two backend replication controllers -kubectl config use-context dev -kubectl create -f examples/cluster-dns/dns-backend-rc.yaml -kubectl config use-context prod -kubectl create -f examples/cluster-dns/dns-backend-rc.yaml - -# create backend services -kubectl config use-context dev -kubectl create -f examples/cluster-dns/dns-backend-service.yaml -kubectl config use-context prod -kubectl create -f examples/cluster-dns/dns-backend-service.yaml - -# create a pod in each namespace and get its output -kubectl config use-context dev -kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml -kubectl logs dns-frontend - -kubectl config use-context prod -kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml -kubectl logs dns-frontend -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cluster-dns/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/cluster-dns/README.md](https://github.com/kubernetes/examples/blob/master/staging/cluster-dns/README.md) diff --git a/examples/cockroachdb/README.md b/examples/cockroachdb/README.md index 015939ffeac..23ffcbdce08 100644 --- a/examples/cockroachdb/README.md +++ b/examples/cockroachdb/README.md @@ -1,125 +1 @@ -# CockroachDB on Kubernetes as a StatefulSet - -This example deploys [CockroachDB](https://cockroachlabs.com) on Kubernetes as -a StatefulSet. CockroachDB is a distributed, scalable NewSQL database. Please see -[the homepage](https://cockroachlabs.com) and the -[documentation](https://www.cockroachlabs.com/docs/) for details. - -## Limitations - -### StatefulSet limitations - -Standard StatefulSet limitations apply: There is currently no possibility to use -node-local storage (outside of single-node tests), and so there is likely -a performance hit associated with running CockroachDB on some external storage. -Note that CockroachDB already does replication and thus it is unnecessary to -deploy it onto persistent volumes which already replicate internally. -For this reason, high-performance use cases on a private Kubernetes cluster -may want to consider a DaemonSet deployment until Stateful Sets support node-local -storage (see #7562). - -### Recovery after persistent storage failure - -A persistent storage failure (e.g. losing the hard drive) is gracefully handled -by CockroachDB as long as enough replicas survive (two out of three by -default). Due to the bootstrapping in this deployment, a storage failure of the -first node is special in that the administrator must manually prepopulate the -"new" storage medium by running an instance of CockroachDB with the `--join` -parameter. If this is not done, the first node will bootstrap a new cluster, -which will lead to a lot of trouble. - -### Dynamic volume provisioning - -The deployment is written for a use case in which dynamic volume provisioning is -available. When that is not the case, the persistent volume claims need -to be created manually. See [minikube.sh](minikube.sh) for the necessary -steps. If you're on GCE or AWS, where dynamic provisioning is supported, no -manual work is needed to create the persistent volumes. - -## Testing locally on minikube - -Follow the steps in [minikube.sh](minikube.sh) (or simply run that file). - -## Testing in the cloud on GCE or AWS - -Once you have a Kubernetes cluster running, just run -`kubectl create -f cockroachdb-statefulset.yaml` to create your cockroachdb cluster. -This works because GCE and AWS support dynamic volume provisioning by default, -so persistent volumes will be created for the CockroachDB pods as needed. - -## Accessing the database - -Along with our StatefulSet configuration, we expose a standard Kubernetes service -that offers a load-balanced virtual IP for clients to access the database -with. In our example, we've called this service `cockroachdb-public`. - -Start up a client pod and open up an interactive, (mostly) Postgres-flavor -SQL shell using: - -```console -$ kubectl run -it --rm cockroach-client --image=cockroachdb/cockroach --restart=Never --command -- ./cockroach sql --host cockroachdb-public --insecure -``` - -You can see example SQL statements for inserting and querying data in the -included [demo script](demo.sh), but can use almost any Postgres-style SQL -commands. Some more basic examples can be found within -[CockroachDB's documentation](https://www.cockroachlabs.com/docs/learn-cockroachdb-sql.html). - -## Accessing the admin UI - -If you want to see information about how the cluster is doing, you can try -pulling up the CockroachDB admin UI by port-forwarding from your local machine -to one of the pods: - -```shell -kubectl port-forward cockroachdb-0 8080 -``` - -Once you’ve done that, you should be able to access the admin UI by visiting -http://localhost:8080/ in your web browser. - -## Simulating failures - -When all (or enough) nodes are up, simulate a failure like this: - -```shell -kubectl exec cockroachdb-0 -- /bin/bash -c "while true; do kill 1; done" -``` - -You can then reconnect to the database as demonstrated above and verify -that no data was lost. The example runs with three-fold replication, so -it can tolerate one failure of any given node at a time. Note also that -there is a brief period of time immediately after the creation of the -cluster during which the three-fold replication is established, and during -which killing a node may lead to unavailability. - -The [demo script](demo.sh) gives an example of killing one instance of the -database and ensuring the other replicas have all data that was written. - -## Scaling up or down - -Scale the Stateful Set by running - -```shell -kubectl scale statefulset cockroachdb --replicas=4 -``` - -Note that you may need to create a new persistent volume claim first. If you -ran `minikube.sh`, there's a spare volume so you can immediately scale up by -one. If you're running on GCE or AWS, you can scale up by as many as you want -because new volumes will automatically be created for you. Convince yourself -that the new node immediately serves reads and writes. - -## Cleaning up when you're done - -Because all of the resources in this example have been tagged with the label `app=cockroachdb`, -we can clean up everything that we created in one quick command using a selector on that label: - -```shell -kubectl delete statefulsets,persistentvolumes,persistentvolumeclaims,services,poddisruptionbudget -l app=cockroachdb -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cockroachdb/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/cockroachdb/README.md](https://github.com/kubernetes/examples/blob/master/staging/cockroachdb/README.md) diff --git a/examples/cockroachdb/cockroachdb-statefulset.yaml b/examples/cockroachdb/cockroachdb-statefulset.yaml index 513c4a300f8..e5e6ae6c729 100644 --- a/examples/cockroachdb/cockroachdb-statefulset.yaml +++ b/examples/cockroachdb/cockroachdb-statefulset.yaml @@ -120,7 +120,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v1.0.1 + image: cockroachdb/cockroach:v1.0.3 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index a50e29197f7..75ecfe69aee 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -1,163 +1 @@ -# Elasticsearch for Kubernetes - -Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so. -Current Elasticsearch version is `1.7.1`. - -[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md). - -WARNING Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](https://kubernetes.io/docs/design/persistent-storage.md). - -## Docker image - -The [pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes) used in this example will not be supported. Feel free to fork to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly. - -## Deploy - -Let's kickstart our cluster with 1 instance of Elasticsearch. - -``` -kubectl create -f examples/elasticsearch/service-account.yaml -kubectl create -f examples/elasticsearch/es-svc.yaml -kubectl create -f examples/elasticsearch/es-rc.yaml -``` - -Let's see if it worked: - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -es-kfymw 1/1 Running 0 7m -kube-dns-p3v1u 3/3 Running 0 19m -``` - -``` -$ kubectl logs es-kfymw -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z] -[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ... -[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites [] -[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] -[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized -[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ... -[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]} -[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA -[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master) -[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]} -[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started -[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state -``` - -So we have a 1-node Elasticsearch cluster ready to handle some work. - -## Scale - -Scaling is as easy as: - -``` -kubectl scale --replicas=3 rc es -``` - -Did it work? - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -es-78e0s 1/1 Running 0 8m -es-kfymw 1/1 Running 0 17m -es-rjmer 1/1 Running 0 8m -kube-dns-p3v1u 3/3 Running 0 30m -``` - -Let's take a look at logs: - -``` -$ kubectl logs es-kfymw -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z] -[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ... -[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites [] -[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] -[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized -[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ... -[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]} -[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA -[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master) -[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]} -[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started -[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state -[2015-08-30 10:08:02,517][INFO ][cluster.service ] [Hammerhead] added {[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true}]) -[2015-08-30 10:10:10,645][INFO ][cluster.service ] [Hammerhead] added {[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true}]) -``` - -So we have a 3-node Elasticsearch cluster ready to handle more work. - -## Access the service - -*Don't forget* that services in Kubernetes are only accessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now. - -``` -$ kubectl get service elasticsearch -NAME LABELS SELECTOR IP(S) PORT(S) -elasticsearch component=elasticsearch component=elasticsearch 10.100.108.94 9200/TCP - 9300/TCP -``` - -From any host on your cluster (that's running `kube-proxy`), run: - -``` -$ curl 10.100.108.94:9200 -``` - -You should see something similar to the following: - - -```json -{ - "status" : 200, - "name" : "Hammerhead", - "cluster_name" : "myesdb", - "version" : { - "number" : "1.7.1", - "build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19", - "build_timestamp" : "2015-07-29T09:54:16Z", - "build_snapshot" : false, - "lucene_version" : "4.10.4" - }, - "tagline" : "You Know, for Search" -} -``` - -Or if you want to check cluster information: - - -``` -curl 10.100.108.94:9200/_cluster/health?pretty -``` - -You should see something similar to the following: - -```json -{ - "cluster_name" : "myesdb", - "status" : "green", - "timed_out" : false, - "number_of_nodes" : 3, - "number_of_data_nodes" : 3, - "active_primary_shards" : 0, - "active_shards" : 0, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 0, - "delayed_unassigned_shards" : 0, - "number_of_pending_tasks" : 0, - "number_of_in_flight_fetch" : 0 -} -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/elasticsearch/README.md](https://github.com/kubernetes/examples/blob/master/staging/elasticsearch/README.md) diff --git a/examples/elasticsearch/production_cluster/README.md b/examples/elasticsearch/production_cluster/README.md index efe002cbbb9..e1da3b72677 100644 --- a/examples/elasticsearch/production_cluster/README.md +++ b/examples/elasticsearch/production_cluster/README.md @@ -1,189 +1 @@ -# Elasticsearch for Kubernetes - -Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so. -Current Elasticsearch version is `1.7.1`. - -Before we start, one needs to know that Elasticsearch best-practices recommend to separate nodes in three roles: -* `Master` nodes - intended for clustering management only, no data, no HTTP API -* `Client` nodes - intended for client usage, no data, with HTTP API -* `Data` nodes - intended for storing and indexing your data, no HTTP API - -This is enforced throughout this document. - -WARNING Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](https://kubernetes.io/docs/design/persistent-storage.md). - -## Docker image - -This example uses [this pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes). Feel free to fork and update it to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly. - -## Deploy - -``` -kubectl create -f examples/elasticsearch/production_cluster/service-account.yaml -kubectl create -f examples/elasticsearch/production_cluster/es-discovery-svc.yaml -kubectl create -f examples/elasticsearch/production_cluster/es-svc.yaml -kubectl create -f examples/elasticsearch/production_cluster/es-master-rc.yaml -``` - -Wait until `es-master` is provisioned, and - -``` -kubectl create -f examples/elasticsearch/production_cluster/es-client-rc.yaml -``` - -Wait until `es-client` is provisioned, and - -``` -kubectl create -f examples/elasticsearch/production_cluster/es-data-rc.yaml -``` - -Wait until `es-data` is provisioned. - -Now, I leave up to you how to validate the cluster, but a first step is to wait for containers to be in ```RUNNING``` state and check the Elasticsearch master logs: - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -es-client-2ep9o 1/1 Running 0 2m -es-data-r9tgv 1/1 Running 0 1m -es-master-vxl6c 1/1 Running 0 6m -``` - -``` -$ kubectl logs es-master-vxl6c -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z] -[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ... -[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites [] -[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] -[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized -[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ... -[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]} -[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ -[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master) -[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started -[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state -[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}]) -[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}]) -``` - -As you can assert, the cluster is up and running. Easy, wasn't it? - -## Scale - -Scaling each type of node to handle your cluster is as easy as: - -``` -kubectl scale --replicas=3 rc es-master -kubectl scale --replicas=2 rc es-client -kubectl scale --replicas=2 rc es-data -``` - -Did it work? - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -es-client-2ep9o 1/1 Running 0 4m -es-client-ye5s1 1/1 Running 0 50s -es-data-8az22 1/1 Running 0 47s -es-data-r9tgv 1/1 Running 0 3m -es-master-57h7k 1/1 Running 0 52s -es-master-kuwse 1/1 Running 0 52s -es-master-vxl6c 1/1 Running 0 8m -``` - -Let's take another look of the Elasticsearch master logs: - -``` -$ kubectl logs es-master-vxl6c -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. -[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z] -[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ... -[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites [] -[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] -[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized -[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ... -[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]} -[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ -[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master) -[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started -[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state -[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}]) -[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}]) -[2015-08-21 11:04:40,781][INFO ][cluster.service ] [Arc] added {[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true}]) -[2015-08-21 11:04:41,076][INFO ][cluster.service ] [Arc] added {[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true}]) -[2015-08-21 11:04:53,966][INFO ][cluster.service ] [Arc] added {[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false}]) -[2015-08-21 11:04:56,803][INFO ][cluster.service ] [Arc] added {[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false}]) -``` - -## Access the service - -*Don't forget* that services in Kubernetes are only accessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now. - -``` -$ kubectl get service elasticsearch -NAME LABELS SELECTOR IP(S) PORT(S) -elasticsearch component=elasticsearch,role=client component=elasticsearch,role=client 10.100.134.2 9200/TCP -``` - -From any host on your cluster (that's running `kube-proxy`), run: - -``` -curl http://10.100.134.2:9200 -``` - -You should see something similar to the following: - - -```json -{ - "status" : 200, - "name" : "Cagliostro", - "cluster_name" : "myesdb", - "version" : { - "number" : "1.7.1", - "build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19", - "build_timestamp" : "2015-07-29T09:54:16Z", - "build_snapshot" : false, - "lucene_version" : "4.10.4" - }, - "tagline" : "You Know, for Search" -} -``` - -Or if you want to check cluster information: - - -``` -curl http://10.100.134.2:9200/_cluster/health?pretty -``` - -You should see something similar to the following: - -```json -{ - "cluster_name" : "myesdb", - "status" : "green", - "timed_out" : false, - "number_of_nodes" : 7, - "number_of_data_nodes" : 2, - "active_primary_shards" : 0, - "active_shards" : 0, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 0, - "delayed_unassigned_shards" : 0, - "number_of_pending_tasks" : 0, - "number_of_in_flight_fetch" : 0 -} -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/production_cluster/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/elasticsearch/production_cluster/README.md](https://github.com/kubernetes/examples/blob/master/staging/elasticsearch/production_cluster/README.md) diff --git a/examples/explorer/README.md b/examples/explorer/README.md index 8fb1fac951a..5451e2b30c3 100644 --- a/examples/explorer/README.md +++ b/examples/explorer/README.md @@ -1,133 +1 @@ -### explorer - -Explorer is a little container for examining the runtime environment Kubernetes produces for your pods. - -The intended use is to substitute gcr.io/google_containers/explorer for your intended container, and then visit it via the proxy. - -Currently, you can look at: - * The environment variables to make sure Kubernetes is doing what you expect. - * The filesystem to make sure the mounted volumes and files are also what you expect. - * Perform DNS lookups, to see how DNS works. - -`pod.yaml` is supplied as an example. You can control the port it serves on with the -port flag. - -Example from command line (the DNS lookup looks better from a web browser): - -```console -$ kubectl create -f examples/explorer/pod.yaml -$ kubectl proxy & -Starting to serve on localhost:8001 - -$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/vars/ -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -HOSTNAME=explorer -KIBANA_LOGGING_PORT_5601_TCP_PORT=5601 -KUBERNETES_SERVICE_HOST=10.0.0.2 -MONITORING_GRAFANA_PORT_80_TCP_PROTO=tcp -MONITORING_INFLUXDB_UI_PORT_80_TCP_PROTO=tcp -KIBANA_LOGGING_SERVICE_PORT=5601 -MONITORING_HEAPSTER_PORT_80_TCP_PORT=80 -MONITORING_INFLUXDB_UI_PORT_80_TCP_PORT=80 -KIBANA_LOGGING_SERVICE_HOST=10.0.204.206 -KIBANA_LOGGING_PORT_5601_TCP=tcp://10.0.204.206:5601 -KUBERNETES_PORT=tcp://10.0.0.2:443 -MONITORING_INFLUXDB_PORT=tcp://10.0.2.30:80 -MONITORING_INFLUXDB_PORT_80_TCP_PROTO=tcp -MONITORING_INFLUXDB_UI_PORT=tcp://10.0.36.78:80 -KUBE_DNS_PORT_53_UDP=udp://10.0.0.10:53 -MONITORING_INFLUXDB_SERVICE_HOST=10.0.2.30 -ELASTICSEARCH_LOGGING_PORT=tcp://10.0.48.200:9200 -ELASTICSEARCH_LOGGING_PORT_9200_TCP_PORT=9200 -KUBERNETES_PORT_443_TCP=tcp://10.0.0.2:443 -ELASTICSEARCH_LOGGING_PORT_9200_TCP_PROTO=tcp -KIBANA_LOGGING_PORT_5601_TCP_ADDR=10.0.204.206 -KUBE_DNS_PORT_53_UDP_ADDR=10.0.0.10 -MONITORING_HEAPSTER_PORT_80_TCP_PROTO=tcp -MONITORING_INFLUXDB_PORT_80_TCP_ADDR=10.0.2.30 -KIBANA_LOGGING_PORT=tcp://10.0.204.206:5601 -MONITORING_GRAFANA_SERVICE_PORT=80 -MONITORING_HEAPSTER_SERVICE_PORT=80 -MONITORING_HEAPSTER_PORT_80_TCP=tcp://10.0.150.238:80 -ELASTICSEARCH_LOGGING_PORT_9200_TCP=tcp://10.0.48.200:9200 -ELASTICSEARCH_LOGGING_PORT_9200_TCP_ADDR=10.0.48.200 -MONITORING_GRAFANA_PORT_80_TCP_PORT=80 -MONITORING_HEAPSTER_PORT=tcp://10.0.150.238:80 -MONITORING_INFLUXDB_PORT_80_TCP=tcp://10.0.2.30:80 -KUBE_DNS_SERVICE_PORT=53 -KUBE_DNS_PORT_53_UDP_PORT=53 -MONITORING_GRAFANA_PORT_80_TCP_ADDR=10.0.100.174 -MONITORING_INFLUXDB_UI_SERVICE_HOST=10.0.36.78 -KIBANA_LOGGING_PORT_5601_TCP_PROTO=tcp -MONITORING_GRAFANA_PORT=tcp://10.0.100.174:80 -MONITORING_INFLUXDB_UI_PORT_80_TCP_ADDR=10.0.36.78 -KUBE_DNS_SERVICE_HOST=10.0.0.10 -KUBERNETES_PORT_443_TCP_PORT=443 -MONITORING_HEAPSTER_PORT_80_TCP_ADDR=10.0.150.238 -MONITORING_INFLUXDB_UI_SERVICE_PORT=80 -KUBE_DNS_PORT=udp://10.0.0.10:53 -ELASTICSEARCH_LOGGING_SERVICE_HOST=10.0.48.200 -KUBERNETES_SERVICE_PORT=443 -MONITORING_HEAPSTER_SERVICE_HOST=10.0.150.238 -MONITORING_INFLUXDB_SERVICE_PORT=80 -MONITORING_INFLUXDB_PORT_80_TCP_PORT=80 -KUBE_DNS_PORT_53_UDP_PROTO=udp -MONITORING_GRAFANA_PORT_80_TCP=tcp://10.0.100.174:80 -ELASTICSEARCH_LOGGING_SERVICE_PORT=9200 -MONITORING_GRAFANA_SERVICE_HOST=10.0.100.174 -MONITORING_INFLUXDB_UI_PORT_80_TCP=tcp://10.0.36.78:80 -KUBERNETES_PORT_443_TCP_PROTO=tcp -KUBERNETES_PORT_443_TCP_ADDR=10.0.0.2 -HOME=/ - -$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/fs/ -mount/ -var/ -.dockerenv -etc/ -dev/ -proc/ -.dockerinit -sys/ -README.md -explorer - -$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/dns?q=elasticsearch-logging - -
      - - -
      -

      LookupNS(elasticsearch-logging):
      -Result: ([]*net.NS)
      -Error: <*>lookup elasticsearch-logging: no such host
      -
      -LookupTXT(elasticsearch-logging):
      -Result: ([]string)
      -Error: <*>lookup elasticsearch-logging: no such host
      -
      -LookupSRV("", "", elasticsearch-logging):
      -cname: elasticsearch-logging.default.svc.cluster.local.
      -Result: ([]*net.SRV)[<*>{Target:(string)elasticsearch-logging.default.svc.cluster.local. Port:(uint16)9200 Priority:(uint16)10 Weight:(uint16)100}]
      -Error: 
      -
      -LookupHost(elasticsearch-logging):
      -Result: ([]string)[10.0.60.245]
      -Error: 
      -
      -LookupIP(elasticsearch-logging):
      -Result: ([]net.IP)[10.0.60.245]
      -Error: 
      -
      -LookupMX(elasticsearch-logging):
      -Result: ([]*net.MX)
      -Error: <*>lookup elasticsearch-logging: no such host
      -
      -
      - - -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/explorer/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/explorer/README.md](https://github.com/kubernetes/examples/blob/master/staging/explorer/README.md) diff --git a/examples/guestbook-go/README.md b/examples/guestbook-go/README.md index a168ad91b31..5125039317e 100644 --- a/examples/guestbook-go/README.md +++ b/examples/guestbook-go/README.md @@ -1,271 +1 @@ -## Guestbook Example - -This example shows how to build a simple multi-tier web application using Kubernetes and Docker. The application consists of a web front-end, Redis master for storage, and replicated set of Redis slaves, all for which we will create Kubernetes replication controllers, pods, and services. - -If you are running a cluster in Google Container Engine (GKE), instead see the [Guestbook Example for Google Container Engine](https://cloud.google.com/container-engine/docs/tutorials/guestbook). - -##### Table of Contents - - * [Step Zero: Prerequisites](#step-zero) - * [Step One: Create the Redis master pod](#step-one) - * [Step Two: Create the Redis master service](#step-two) - * [Step Three: Create the Redis slave pods](#step-three) - * [Step Four: Create the Redis slave service](#step-four) - * [Step Five: Create the guestbook pods](#step-five) - * [Step Six: Create the guestbook service](#step-six) - * [Step Seven: View the guestbook](#step-seven) - * [Step Eight: Cleanup](#step-eight) - -### Step Zero: Prerequisites - -This example assumes that you have a working cluster. See the [Getting Started Guides](https://kubernetes.io/docs/getting-started-guides/) for details about creating a cluster. - -**Tip:** View all the `kubectl` commands, including their options and descriptions in the [kubectl CLI reference](https://kubernetes.io/docs/user-guide/kubectl/kubectl.md). - -### Step One: Create the Redis master pod - -Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](https://kubernetes.io/docs/user-guide/replication-controller.md) and Redis master [pod](https://kubernetes.io/docs/user-guide/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive). - -1. Use the [redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/redis-master-controller.json - replicationcontrollers/redis-master - ``` - -2. To verify that the redis-master controller is up, list the replication controllers you created in the cluster with the `kubectl get rc` command(if you don't specify a `--namespace`, the `default` namespace will be used. The same below): - - ```console - $ kubectl get rc - CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS - redis-master redis-master gurpartap/redis app=redis,role=master 1 - ... - ``` - - Result: The replication controller then creates the single Redis master pod. - -3. To verify that the redis-master pod is running, list the pods you created in cluster with the `kubectl get pods` command: - - ```console - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - redis-master-xx4uv 1/1 Running 0 1m - ... - ``` - - Result: You'll see a single Redis master pod and the machine where the pod is running after the pod gets placed (may take up to thirty seconds). - -4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud compute ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`: - - ```console - me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-bz1p - - me@kubernetes-node-3:~$ sudo docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS - d5c458dabe50 redis "/entrypoint.sh redis" 5 minutes ago Up 5 minutes - ``` - - Note: The initial `docker pull` can take a few minutes, depending on network conditions. - -### Step Two: Create the Redis master service - -A Kubernetes [service](https://kubernetes.io/docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more pods. The services in a Kubernetes cluster are discoverable inside other pods via environment variables or DNS. - -Services find the pods to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service. - -1. Use the [redis-master-service.json](redis-master-service.json) file to create the service in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/redis-master-service.json - services/redis-master - ``` - -2. To verify that the redis-master service is up, list the services you created in the cluster with the `kubectl get services` command: - - ```console - $ kubectl get services - NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE - redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h - ... - ``` - - Result: All new pods will see the `redis-master` service running on the host (`$REDIS_MASTER_SERVICE_HOST` environment variable) at port 6379, or running on `redis-master:6379`. After the service is created, the service proxy on each node is configured to set up a proxy on the specified port (in our example, that's port 6379). - - -### Step Three: Create the Redis slave pods - -The Redis master we created earlier is a single pod (REPLICAS = 1), while the Redis read slaves we are creating here are 'replicated' pods. In Kubernetes, a replication controller is responsible for managing the multiple instances of a replicated pod. - -1. Use the file [redis-slave-controller.json](redis-slave-controller.json) to create the replication controller by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/redis-slave-controller.json - replicationcontrollers/redis-slave - ``` - -2. To verify that the redis-slave controller is running, run the `kubectl get rc` command: - - ```console - $ kubectl get rc - CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS - redis-master redis-master redis app=redis,role=master 1 - redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2 - ... - ``` - - Result: The replication controller creates and configures the Redis slave pods through the redis-master service (name:port pair, in our example that's `redis-master:6379`). - - Example: - The Redis slaves get started by the replication controller with the following command: - - ```console - redis-server --slaveof redis-master 6379 - ``` - -3. To verify that the Redis master and slaves pods are running, run the `kubectl get pods` command: - - ```console - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - redis-master-xx4uv 1/1 Running 0 18m - redis-slave-b6wj4 1/1 Running 0 1m - redis-slave-iai40 1/1 Running 0 1m - ... - ``` - - Result: You see the single Redis master and two Redis slave pods. - -### Step Four: Create the Redis slave service - -Just like the master, we want to have a service to proxy connections to the read slaves. In this case, in addition to discovery, the Redis slave service provides transparent load balancing to clients. - -1. Use the [redis-slave-service.json](redis-slave-service.json) file to create the Redis slave service by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/redis-slave-service.json - services/redis-slave - ``` - -2. To verify that the redis-slave service is up, list the services you created in the cluster with the `kubectl get services` command: - - ```console - $ kubectl get services - NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE - redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h - redis-slave 10.0.21.92 6379/TCP app-redis,role=slave 1h - ... - ``` - - Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves. - -Tip: It is helpful to set labels on your services themselves--as we've done here--to make it easy to locate them later. - -### Step Five: Create the guestbook pods - -This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni) based) server that is configured to talk to either the slave or master services depending on whether the request is a read or a write. The pods we are creating expose a simple JSON interface and serves a jQuery-Ajax based UI. Like the Redis read slaves, these pods are also managed by a replication controller. - -1. Use the [guestbook-controller.json](guestbook-controller.json) file to create the guestbook replication controller by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/guestbook-controller.json - replicationcontrollers/guestbook - ``` - - Tip: If you want to modify the guestbook code open the `_src` of this example and read the README.md and the Makefile. If you have pushed your custom image be sure to update the `image` accordingly in the guestbook-controller.json. - -2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command: - - ```console - $ kubectl get rc - CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS - guestbook guestbook gcr.io/google_containers/guestbook:v3 app=guestbook 3 - redis-master redis-master redis app=redis,role=master 1 - redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2 - ... - ``` - -3. To verify that the guestbook pods are running (it might take up to thirty seconds to create the pods), list the pods you created in cluster with the `kubectl get pods` command: - - ```console - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - guestbook-3crgn 1/1 Running 0 2m - guestbook-gv7i6 1/1 Running 0 2m - guestbook-x405a 1/1 Running 0 2m - redis-master-xx4uv 1/1 Running 0 23m - redis-slave-b6wj4 1/1 Running 0 6m - redis-slave-iai40 1/1 Running 0 6m - ... - ``` - - Result: You see a single Redis master, two Redis slaves, and three guestbook pods. - -### Step Six: Create the guestbook service - -Just like the others, we create a service to group the guestbook pods but this time, to make the guestbook front-end externally visible, we specify `"type": "LoadBalancer"`. - -1. Use the [guestbook-service.json](guestbook-service.json) file to create the guestbook service by running the `kubectl create -f` *`filename`* command: - - ```console - $ kubectl create -f examples/guestbook-go/guestbook-service.json - ``` - - -2. To verify that the guestbook service is up, list the services you created in the cluster with the `kubectl get services` command: - - ```console - $ kubectl get services - NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE - guestbook 10.0.217.218 146.148.81.8 3000/TCP app=guestbook 1h - redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h - redis-slave 10.0.21.92 6379/TCP app-redis,role=slave 1h - ... - ``` - - Result: The service is created with label `app=guestbook`. - -### Step Seven: View the guestbook - -You can now play with the guestbook that you just created by opening it in a browser (it might take a few moments for the guestbook to come up). - - * **Local Host:** - If you are running Kubernetes locally, to view the guestbook, navigate to `http://localhost:3000` in your browser. - - * **Remote Host:** - 1. To view the guestbook on a remote host, locate the external IP of the load balancer in the **IP** column of the `kubectl get services` output. In our example, the internal IP address is `10.0.217.218` and the external IP address is `146.148.81.8` (*Note: you might need to scroll to see the IP column*). - - 2. Append port `3000` to the IP address (for example `http://146.148.81.8:3000`), and then navigate to that address in your browser. - - Result: The guestbook displays in your browser: - - ![Guestbook](guestbook-page.png) - - **Further Reading:** - If you're using Google Compute Engine, see the details about limiting traffic to specific sources at [Google Compute Engine firewall documentation][gce-firewall-docs]. - -[cloud-console]: https://console.developer.google.com -[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls - -### Step Eight: Cleanup - -After you're done playing with the guestbook, you can cleanup by deleting the guestbook service and removing the associated resources that were created, including load balancers, forwarding rules, target pools, and Kubernetes replication controllers and services. - -Delete all the resources by running the following `kubectl delete -f` *`filename`* command: - -```console -$ kubectl delete -f examples/guestbook-go -guestbook-controller -guestbook -redid-master-controller -redis-master -redis-slave-controller -redis-slave -``` - -Tip: To turn down your Kubernetes cluster, follow the corresponding instructions in the version of the -[Getting Started Guides](https://kubernetes.io/docs/getting-started-guides/) that you previously used to create your cluster. - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/guestbook-go/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/guestbook-go/README.md](https://github.com/kubernetes/examples/blob/master/guestbook-go/README.md) diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index 42d14f6e2f4..baee2f834c0 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -1,702 +1 @@ - -## Guestbook Example - -This example shows how to build a simple, multi-tier web application using Kubernetes and [Docker](https://www.docker.com/). - -**Table of Contents** - - - - [Guestbook Example](#guestbook-example) - - [Prerequisites](#prerequisites) - - [Quick Start](#quick-start) - - [Step One: Start up the redis master](#step-one-start-up-the-redis-master) - - [Define a Deployment](#define-a-deployment) - - [Define a Service](#define-a-service) - - [Create a Service](#create-a-service) - - [Finding a Service](#finding-a-service) - - [Environment variables](#environment-variables) - - [DNS service](#dns-service) - - [Create a Deployment](#create-a-deployment) - - [Optional Interlude](#optional-interlude) - - [Step Two: Start up the redis slave](#step-two-start-up-the-redis-slave) - - [Step Three: Start up the guestbook frontend](#step-three-start-up-the-guestbook-frontend) - - [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific) - - [Step Four: Cleanup](#step-four-cleanup) - - [Troubleshooting](#troubleshooting) - - [Appendix: Accessing the guestbook site externally](#appendix-accessing-the-guestbook-site-externally) - - [Google Compute Engine External Load Balancer Specifics](#google-compute-engine-external-load-balancer-specifics) - - - -The example consists of: - -- A web frontend -- A [redis](http://redis.io/) master (for storage), and a replicated set of redis 'slaves'. - -The web frontend interacts with the redis master via javascript redis API calls. - -**Note**: If you are running this example on a [Google Container Engine](https://cloud.google.com/container-engine/) installation, see [this Google Container Engine guestbook walkthrough](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. The basic concepts are the same, but the walkthrough is tailored to a Container Engine setup. - -### Prerequisites - -This example requires a running Kubernetes cluster. First, check that kubectl is properly configured by getting the cluster state: - -```console -$ kubectl cluster-info -``` - -If you see a url response, you are ready to go. If not, read the [Getting Started guides](http://kubernetes.io/docs/getting-started-guides/) for how to get started, and follow the [prerequisites](http://kubernetes.io/docs/user-guide/prereqs/) to install and configure `kubectl`. As noted above, if you have a Google Container Engine cluster set up, read [this example](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. - -All the files referenced in this example can be downloaded in [current folder](./). - -### Quick Start - -This section shows the simplest way to get the example work. If you want to know the details, you should skip this and read [the rest of the example](#step-one-start-up-the-redis-master). - -Start the guestbook with one command: - -```console -$ kubectl create -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml -service "redis-master" created -deployment "redis-master" created -service "redis-slave" created -deployment "redis-slave" created -service "frontend" created -deployment "frontend" created -``` - -Alternatively, you can start the guestbook by running: - -```console -$ kubectl create -f examples/guestbook/ -``` - -Then, list all your Services: - -```console -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend 10.0.0.117 80/TCP 20s -redis-master 10.0.0.170 6379/TCP 20s -redis-slave 10.0.0.201 6379/TCP 20s -``` - -Now you can access the guestbook on each node with frontend Service's `:`, e.g. `10.0.0.117:80` in this guide. `` is a cluster-internal IP. If you want to access the guestbook from outside of the cluster, add `type: NodePort` to the frontend Service `spec` field. Then you can access the guestbook with `:NodePort` from outside of the cluster. On cloud providers which support external load balancers, adding `type: LoadBalancer` to the frontend Service `spec` field will provision a load balancer for your Service. There are several ways for you to access the guestbook. You may learn from [Accessing services running on the cluster](https://kubernetes.io/docs/concepts/cluster-administration/access-cluster/#accessing-services-running-on-the-cluster). - -Clean up the guestbook: - -```console -$ kubectl delete -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml -``` - -or - -```console -$ kubectl delete -f examples/guestbook/ -``` - - -### Step One: Start up the redis master - -Before continuing to the gory details, we also recommend you to read Kubernetes [concepts and user guide](http://kubernetes.io/docs/user-guide/). -**Note**: The redis master in this example is *not* highly available. Making it highly available would be an interesting, but intricate exercise — redis doesn't actually support multi-master Deployments at this point in time, so high availability would be a somewhat tricky thing to implement, and might involve periodic serialization to disk, and so on. - -#### Define a Deployment - -To start the redis master, use the file [redis-master-deployment.yaml](redis-master-deployment.yaml), which describes a single [pod](http://kubernetes.io/docs/user-guide/pods/) running a redis key-value server in a container. - -Although we have a single instance of our redis master, we are using a [Deployment](http://kubernetes.io/docs/user-guide/deployments/) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the Deployment will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.) - -The file [redis-master-deployment.yaml](redis-master-deployment.yaml) defines the redis master Deployment: - - - -```yaml -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: redis-master - # these labels can be applied automatically - # from the labels in the pod template if not set - # labels: - # app: redis - # role: master - # tier: backend -spec: - # this replicas value is default - # modify it according to your case - replicas: 1 - # selector can be applied automatically - # from the labels in the pod template if not set - # selector: - # matchLabels: - # app: guestbook - # role: master - # tier: backend - template: - metadata: - labels: - app: redis - role: master - tier: backend - spec: - containers: - - name: master - image: gcr.io/google_containers/redis:e2e - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 -``` - -[Download example](redis-master-deployment.yaml?raw=true) - - -#### Define a Service - -A Kubernetes [Service](http://kubernetes.io/docs/user-guide/services/) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](http://kubernetes.io/docs/user-guide/labels/) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a Service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP. - -Services find the pods to load balance based on the pods' labels. -The selector field of the Service description determines which pods will receive the traffic sent to the Service, and the `port` and `targetPort` information defines what port the Service proxy will run at. - -The file [redis-master-service.yaml](redis-master-deployment.yaml) defines the redis master Service: - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: redis-master - labels: - app: redis - role: master - tier: backend -spec: - ports: - # the port that this service should serve on - - port: 6379 - targetPort: 6379 - selector: - app: redis - role: master - tier: backend -``` - -[Download example](redis-master-service.yaml?raw=true) - - -#### Create a Service - -According to the [config best practices](http://kubernetes.io/docs/user-guide/config-best-practices/), create a Service before corresponding Deployments so that the scheduler can spread the pods comprising the Service. So we first create the Service by running: - -```console -$ kubectl create -f examples/guestbook/redis-master-service.yaml -service "redis-master" created -``` - -Then check the list of services, which should include the redis-master: - -```console -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -redis-master 10.0.76.248 6379/TCP 1s -``` - -This will cause all pods to see the redis master apparently running on `:`. A Service can map an incoming port to any `targetPort` in the backend pod. Once created, the Service proxy on each node is configured to set up a proxy on the specified port (in this case port `6379`). - -`targetPort` will default to `port` if it is omitted in the configuration. `targetPort` is the port the container accepts traffic on, and `port` is the abstracted Service port, which can be any port other pods use to access the Service. For simplicity's sake, we omit it in the following configurations. - -The traffic flow from slaves to masters can be described in two steps: - - - A *redis slave* will connect to `port` on the *redis master Service* - - Traffic will be forwarded from the Service `port` (on the Service node) to the `targetPort` on the pod that the Service listens to. - -For more details, please see [Connecting applications](http://kubernetes.io/docs/user-guide/connecting-applications/). - -#### Finding a Service - -Kubernetes supports two primary modes of finding a Service — environment variables and DNS. - - -##### Environment variables - -The services in a Kubernetes cluster are discoverable inside other containers via [environment variables](https://kubernetes.io/docs/concepts/services-networking/service/#environment-variables). - -##### DNS service - -An alternative is to use the [cluster's DNS service](https://kubernetes.io/docs/concepts/services-networking/service/#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the Service name. - -This example has been configured to use the DNS service by default. - -If your cluster does not have the DNS service enabled, then you can use environment variables by setting the -`GET_HOSTS_FROM` env value in both -[redis-slave-deployment.yaml](redis-slave-deployment.yaml) and [frontend-deployment.yaml](frontend-deployment.yaml) -from `dns` to `env` before you start up the app. -(However, this is unlikely to be necessary. You can check for the DNS service in the list of the cluster's services by -running `kubectl --namespace=kube-system get rc -l k8s-app=kube-dns`.) -Note that switching to env causes creation-order dependencies, since Services need to be created before their clients that require env vars. - -#### Create a Deployment - -Second, create the redis master pod in your Kubernetes cluster by running: - -```console -$ kubectl create -f examples/guestbook/redis-master-deployment.yaml -deployment "redis-master" created -``` - -You can see the Deployment for your cluster by running: - -```console -$ kubectl get deployments -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -redis-master 1 1 1 1 27s -``` - -Then, you can list the pods in the cluster, to verify that the master is running: - -```console -$ kubectl get pods -``` - -You'll see all pods in the cluster, including the redis master pod, and the status of each pod. -The name of the redis master will look similar to that in the following list: - -```console -NAME READY STATUS RESTARTS AGE -redis-master-2353460263-1ecey 1/1 Running 0 1m -... -``` - -(Note that an initial `docker pull` to grab a container image may take a few minutes, depending on network conditions. A pod will be reported as `Pending` while its image is being downloaded.) - -`kubectl get pods` will show only the pods in the default [namespace](http://kubernetes.io/docs/user-guide/namespaces/). To see pods in all namespaces, run: - -``` -kubectl get pods --all-namespaces -``` - -For more details, please see [Configuring containers](http://kubernetes.io/docs/user-guide/configuring-containers/) and [Deploying applications](http://kubernetes.io/docs/user-guide/deploying-applications/). - -#### Optional Interlude - -You can get information about a pod, including the machine that it is running on, via `kubectl describe pods/`. E.g., for the redis master, you should see something like the following (your pod name will be different): - -```console -$ kubectl describe pods redis-master-2353460263-1ecey -Name: redis-master-2353460263-1ecey -Node: kubernetes-node-m0k7/10.240.0.5 -... -Labels: app=redis,pod-template-hash=2353460263,role=master,tier=backend -Status: Running -IP: 10.244.2.3 -Controllers: ReplicaSet/redis-master-2353460263 -Containers: - master: - Container ID: docker://76cf8115485966131587958ea3cbe363e2e1dcce129e2e624883f393ce256f6c - Image: gcr.io/google_containers/redis:e2e - Image ID: docker://e5f6c5a2b5646828f51e8e0d30a2987df7e8183ab2c3ed0ca19eaa03cc5db08c - Port: 6379/TCP -... -``` - -The `Node` is the name and IP of the machine, e.g. `kubernetes-node-m0k7` in the example above. You can find more details about this node with `kubectl describe nodes kubernetes-node-m0k7`. - -If you want to view the container logs for a given pod, you can run: - -```console -$ kubectl logs -``` - -These logs will usually give you enough information to troubleshoot. - -However, if you should want to SSH to the listed host machine, you can inspect various logs there directly as well. For example, with Google Compute Engine, using `gcloud`, you can SSH like this: - -```console -me@workstation$ gcloud compute ssh -``` - -Then, you can look at the Docker containers on the remote machine. You should see something like this (the specifics of the IDs will be different): - -```console -me@kubernetes-node-krxw:~$ sudo docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -... -0ffef9649265 redis:latest "/entrypoint.sh redi" About a minute ago Up About a minute k8s_master.869d22f3_redis-master-dz33o_default_1449a58a-5ead-11e5-a104-688f84ef8ef6_d74cb2b5 -``` - -If you want to see the logs for a given container, you can run: - -```console -$ docker logs -``` - -### Step Two: Start up the redis slave - -Now that the redis master is running, we can start up its 'read slaves'. - -We'll define these as replicated pods as well, though this time — unlike for the redis master — we'll define the number of replicas to be 2. -In Kubernetes, a Deployment is responsible for managing multiple instances of a replicated pod. The Deployment will automatically launch new pods if the number of replicas falls below the specified number. -(This particular replicated pod is a great one to test this with -- you can try killing the Docker processes for your pods directly, then watch them come back online on a new node shortly thereafter.) - -Just like the master, we want to have a Service to proxy connections to the redis slaves. In this case, in addition to discovery, the slave Service will provide transparent load balancing to web app clients. - -This time we put the Service and Deployment into one [file](http://kubernetes.io/docs/user-guide/managing-deployments/#organizing-resource-configurations). Grouping related objects together in a single file is often better than having separate files. -The specification for the slaves is in [all-in-one/redis-slave.yaml](all-in-one/redis-slave.yaml): - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: redis-slave - labels: - app: redis - role: slave - tier: backend -spec: - ports: - # the port that this service should serve on - - port: 6379 - selector: - app: redis - role: slave - tier: backend ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: redis-slave - # these labels can be applied automatically - # from the labels in the pod template if not set - # labels: - # app: redis - # role: slave - # tier: backend -spec: - # this replicas value is default - # modify it according to your case - replicas: 2 - # selector can be applied automatically - # from the labels in the pod template if not set - # selector: - # matchLabels: - # app: guestbook - # role: slave - # tier: backend - template: - metadata: - labels: - app: redis - role: slave - tier: backend - spec: - containers: - - name: slave - image: gcr.io/google_samples/gb-redisslave:v1 - resources: - requests: - cpu: 100m - memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns - # If your cluster config does not include a dns service, then to - # instead access an environment variable to find the master - # service's host, comment out the 'value: dns' line above, and - # uncomment the line below. - # value: env - ports: - - containerPort: 6379 -``` - -[Download example](all-in-one/redis-slave.yaml?raw=true) - - -This time the selector for the Service is `app=redis,role=slave,tier=backend`, because that identifies the pods running redis slaves. It is generally helpful to set labels on your Service itself as we've done here to make it easy to locate them with the `kubectl get services -l "app=redis,role=slave,tier=backend"` command. For more information on the usage of labels, see [using-labels-effectively](http://kubernetes.io/docs/user-guide/managing-deployments/#using-labels-effectively). - -Now that you have created the specification, create the Service in your cluster by running: - -```console -$ kubectl create -f examples/guestbook/all-in-one/redis-slave.yaml -service "redis-slave" created -deployment "redis-slave" created - -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -redis-master 10.0.76.248 6379/TCP 20m -redis-slave 10.0.112.188 6379/TCP 16s - -$ kubectl get deployments -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -redis-master 1 1 1 1 22m -redis-slave 2 2 2 2 2m -``` - -Once the Deployment is up, you can list the pods in the cluster, to verify that the master and slaves are running. You should see a list that includes something like the following: - -```console -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -redis-master-2353460263-1ecey 1/1 Running 0 35m -redis-slave-1691881626-dlf5f 1/1 Running 0 15m -redis-slave-1691881626-sfn8t 1/1 Running 0 15m -``` - -You should see a single redis master pod and two redis slave pods. As mentioned above, you can get more information about any pod with: `kubectl describe pods/`. And also can view the resources on [kube-ui](http://kubernetes.io/docs/user-guide/ui/). - -### Step Three: Start up the guestbook frontend - -A frontend pod is a simple PHP server that is configured to talk to either the slave or master services, depending on whether the client request is a read or a write. It exposes a simple AJAX interface, and serves an Angular-based UX. -Again we'll create a set of replicated frontend pods instantiated by a Deployment — this time, with three replicas. - -As with the other pods, we now want to create a Service to group the frontend pods. -The Deployment and Service are described in the file [all-in-one/frontend.yaml](all-in-one/frontend.yaml): - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - app: guestbook - tier: frontend -spec: - # if your cluster supports it, uncomment the following to automatically create - # an external load-balanced IP for the frontend service. - # type: LoadBalancer - ports: - # the port that this service should serve on - - port: 80 - selector: - app: guestbook - tier: frontend ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: frontend - # these labels can be applied automatically - # from the labels in the pod template if not set - # labels: - # app: guestbook - # tier: frontend -spec: - # this replicas value is default - # modify it according to your case - replicas: 3 - # selector can be applied automatically - # from the labels in the pod template if not set - # selector: - # matchLabels: - # app: guestbook - # tier: frontend - template: - metadata: - labels: - app: guestbook - tier: frontend - spec: - containers: - - name: php-redis - image: gcr.io/google-samples/gb-frontend:v4 - resources: - requests: - cpu: 100m - memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns - # If your cluster config does not include a dns service, then to - # instead access environment variables to find service host - # info, comment out the 'value: dns' line above, and uncomment the - # line below. - # value: env - ports: - - containerPort: 80 -``` - -[Download example](all-in-one/frontend.yaml?raw=true) - - -#### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) - -For supported cloud providers, such as Google Compute Engine or Google Container Engine, you can specify to use an external load balancer -in the service `spec`, to expose the service onto an external load balancer IP. -To do this, uncomment the `type: LoadBalancer` line in the [all-in-one/frontend.yaml](all-in-one/frontend.yaml) file before you start the service. - -[See the appendix below](#appendix-accessing-the-guestbook-site-externally) on accessing the guestbook site externally for more details. - -Create the service and Deployment like this: - -```console -$ kubectl create -f examples/guestbook/all-in-one/frontend.yaml -service "frontend" created -deployment "frontend" created -``` - -Then, list all your services again: - -```console -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend 10.0.63.63 80/TCP 1m -redis-master 10.0.76.248 6379/TCP 39m -redis-slave 10.0.112.188 6379/TCP 19m -``` - -Also list all your Deployments: - -```console -$ kubectl get deployments -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -frontend 3 3 3 3 2m -redis-master 1 1 1 1 39m -redis-slave 2 2 2 2 20m -``` - -Once it's up, i.e. when desired replicas match current replicas (again, it may take up to thirty seconds to create the pods), you can list the pods with specified labels in the cluster, to verify that the master, slaves and frontends are all running. You should see a list containing pods with label 'tier' like the following: - -```console -$ kubectl get pods -L tier -NAME READY STATUS RESTARTS AGE TIER -frontend-1211764471-4e1j2 1/1 Running 0 4m frontend -frontend-1211764471-gkbkv 1/1 Running 0 4m frontend -frontend-1211764471-rk1cf 1/1 Running 0 4m frontend -redis-master-2353460263-1ecey 1/1 Running 0 42m backend -redis-slave-1691881626-dlf5f 1/1 Running 0 22m backend -redis-slave-1691881626-sfn8t 1/1 Running 0 22m backend -``` - -You should see a single redis master pod, two redis slaves, and three frontend pods. - -The code for the PHP server that the frontends are running is in `examples/guestbook/php-redis/guestbook.php`. It looks like this: - -```php - 'tcp', - 'host' => $host, - 'port' => 6379, - ]); - - $client->set($_GET['key'], $_GET['value']); - print('{"message": "Updated"}'); - } else { - $host = 'redis-slave'; - if (getenv('GET_HOSTS_FROM') == 'env') { - $host = getenv('REDIS_SLAVE_SERVICE_HOST'); - } - $client = new Predis\Client([ - 'scheme' => 'tcp', - 'host' => $host, - 'port' => 6379, - ]); - - $value = $client->get($_GET['key']); - print('{"data": "' . $value . '"}'); - } -} else { - phpinfo(); -} ?> -``` - -Note the use of the `redis-master` and `redis-slave` host names -- we're finding those Services via the Kubernetes cluster's DNS service, as discussed above. All the frontend replicas will write to the load-balancing redis-slaves service, which can be highly replicated as well. - -### Step Four: Cleanup - -If you are in a live Kubernetes cluster, you can just kill the pods by deleting the Deployments and Services. Using labels to select the resources to delete is an easy way to do this in one command. - -```console -$ kubectl delete deployments,services -l "app in (redis, guestbook)" -``` - -To completely tear down a Kubernetes cluster, if you ran this from source, you can use: - -```console -$ /cluster/kube-down.sh -``` - -### Troubleshooting - -If you are having trouble bringing up your guestbook app, double check that your external IP is properly defined for your frontend Service, and that the firewall for your cluster nodes is open to port 80. - -Then, see the [troubleshooting documentation](http://kubernetes.io/docs/troubleshooting/) for a further list of common issues and how you can diagnose them. - - - -### Appendix: Accessing the guestbook site externally - -You'll want to set up your guestbook Service so that it can be accessed from outside of the internal Kubernetes network. Above, we introduced one way to do that, by setting `type: LoadBalancer` to Service `spec`. - -More generally, Kubernetes supports two ways of exposing a Service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types). - -If the `LoadBalancer` specification is used, it can take a short period for an external IP to show up in `kubectl get services` output, but you should then see it listed as well, e.g. like this: - -```console -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend 10.0.63.63 23.236.59.54 80/TCP 1m -redis-master 10.0.76.248 6379/TCP 39m -redis-slave 10.0.112.188 6379/TCP 19m -``` - -Once you've exposed the service to an external IP, visit the IP to see your guestbook in action, i.e. `http://:`. - -You should see a web page that looks something like this (without the messages). Try adding some entries to it! - - - -If you are more advanced in the ops arena, you can also manually get the service IP from looking at the output of `kubectl get pods,services`, and modify your firewall using standard tools and services (firewalld, iptables, selinux) which you are already familiar with. - -#### Google Compute Engine External Load Balancer Specifics - -In Google Compute Engine, Kubernetes automatically creates forwarding rules for services with `LoadBalancer`. - -You can list the forwarding rules like this (the forwarding rule also indicates the external IP): - -```console -$ gcloud compute forwarding-rules list -NAME REGION IP_ADDRESS IP_PROTOCOL TARGET -frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend -``` - -In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-node` (replace with your tags as appropriate): - -```console -$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-node kubernetes-node-80 -``` - -For GCE Kubernetes startup details, see the [Getting started on Google Compute Engine](http://kubernetes.io/docs/getting-started-guides/gce/) - -For Google Compute Engine details about limiting traffic to specific sources, see the [Google Compute Engine firewall documentation][gce-firewall-docs]. - -[cloud-console]: https://console.developer.google.com -[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/guestbook/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/guestbook/README.md](https://github.com/kubernetes/examples/blob/master/guestbook/README.md) diff --git a/examples/https-nginx/README.md b/examples/https-nginx/README.md index b0e147e4f47..6ce72378cc5 100644 --- a/examples/https-nginx/README.md +++ b/examples/https-nginx/README.md @@ -1,129 +1 @@ - -# Nginx https service - -This example creates a basic nginx https service useful in verifying proof of concept, keys, secrets, configmap, and end-to-end https service creation in kubernetes. -It uses an [nginx server block](http://wiki.nginx.org/ServerBlockExample) to serve the index page over both http and https. It will detect changes to nginx's configuration file, default.conf, mounted as a configmap volume and reload nginx automatically. - -### Generate certificates - -First generate a self signed rsa key and certificate that the server can use for TLS. This step invokes the make_secret.go script in the same directory, which uses the kubernetes api to generate a secret json config in /tmp/secret.json. - -```sh -$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json -``` - -### Create a https nginx application running in a kubernetes cluster - -You need a [running kubernetes cluster](https://kubernetes.io/docs/setup/pick-right-solution/) for this to work. - -Create a secret and a configmap. - -```sh -$ kubectl create -f /tmp/secret.json -secret "nginxsecret" created - -$ kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf -configmap "nginxconfigmap" created -``` - -Create a service and a replication controller using the configuration in nginx-app.yaml. - -```sh -$ kubectl create -f examples/https-nginx/nginx-app.yaml -You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (tcp:32211,tcp:30028) to serve traffic. -... -service "nginxsvc" created -replicationcontroller "my-nginx" created -``` - -Then, find the node port that Kubernetes is using for http and https traffic. - -```sh -$ kubectl get service nginxsvc -o json -... - { - "name": "http", - "protocol": "TCP", - "port": 80, - "targetPort": 80, - "nodePort": 32211 - }, - { - "name": "https", - "protocol": "TCP", - "port": 443, - "targetPort": 443, - "nodePort": 30028 - } -... -``` - -If you are using Kubernetes on a cloud provider, you may need to create cloud firewall rules to serve traffic. -If you are using GCE or GKE, you can use the following commands to add firewall rules. - -```sh -$ gcloud compute firewall-rules create allow-nginx-http --allow tcp:32211 --description "Incoming http allowed." -Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-http]. -NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS -allow-nginx-http default 0.0.0.0/0 tcp:32211 - -$ gcloud compute firewall-rules create allow-nginx-https --allow tcp:30028 --description "Incoming https allowed." -Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-https]. -NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS -allow-nginx-https default 0.0.0.0/0 tcp:30028 -``` - -Find your nodes' IPs. - -```sh -$ kubectl get nodes -o json | grep ExternalIP -A 2 - "type": "ExternalIP", - "address": "104.198.1.26" - } --- - "type": "ExternalIP", - "address": "104.198.12.158" - } --- - "type": "ExternalIP", - "address": "104.198.11.137" - } -``` - -Now your service is up. You can either use your browser or type the following commands. - -```sh -$ curl https://: -k - -$ curl https://104.198.1.26:30028 -k -... -Welcome to nginx! -... -``` - -Then we will update the configmap by changing `index.html` to `index2.html`. - -```sh -kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf -o yaml --dry-run\ -| sed 's/index.html/index2.html/g' | kubectl apply -f - -configmap "nginxconfigmap" configured -``` - -Wait a few seconds to let the change propagate. Now you should be able to either use your browser or type the following commands to verify Nginx has been reloaded with new configuration. - -```sh -$ curl https://: -k - -$ curl https://104.198.1.26:30028 -k -... -Nginx reloaded! -... -``` - -For more information on how to run this in a kubernetes cluster, please see the [user-guide](https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/). - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/https-nginx/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/https-nginx/README.md](https://github.com/kubernetes/examples/blob/master/staging/https-nginx/README.md) diff --git a/examples/javaee/README.md b/examples/javaee/README.md index 9410f84b925..1fcd411acc6 100644 --- a/examples/javaee/README.md +++ b/examples/javaee/README.md @@ -1,134 +1 @@ -## Java EE Application using WildFly and MySQL - -The following document describes the deployment of a Java EE application using [WildFly](http://wildfly.org) application server and MySQL database server on Kubernetes. The sample application source code is at: https://github.com/javaee-samples/javaee7-simple-sample. - -### Prerequisites - -https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md - -### Start MySQL Pod - -In Kubernetes a [_Pod_](https://kubernetes.io/docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume. - -Here is the config for MySQL pod: [mysql-pod.yaml](mysql-pod.yaml) - - - - -Create the MySQL pod: - -```sh -kubectl create -f examples/javaee/mysql-pod.yaml -``` - -Check status of the pod: - -```sh -kubectl get -w po -NAME READY STATUS RESTARTS AGE -mysql-pod 0/1 Pending 0 4s -NAME READY STATUS RESTARTS AGE -mysql-pod 0/1 Running 0 44s -mysql-pod 1/1 Running 0 44s -``` - -Wait for the status to `1/1` and `Running`. - -### Start MySQL Service - -We are creating a [_Service_](https://kubernetes.io/docs/user-guide/services.md) to expose the TCP port of the MySQL server. A Service distributes traffic across a set of Pods. The order of Service and the targeted Pods does not matter. However Service needs to be started before any other Pods consuming the Service are started. - -In this application, we will use a Kubernetes Service to provide a discoverable endpoints for the MySQL endpoint in the cluster. MySQL service target pods with the labels `name: mysql-pod` and `context: docker-k8s-lab`. - -Here is definition of the MySQL service: [mysql-service.yaml](mysql-service.yaml) - - - - -Create this service: - -```sh -kubectl create -f examples/javaee/mysql-service.yaml -``` - -Get status of the service: - -```sh -kubectl get -w svc -NAME LABELS SELECTOR IP(S) PORT(S) -kubernetes component=apiserver,provider=kubernetes 10.247.0.1 443/TCP -mysql-service context=docker-k8s-lab,name=mysql-pod context=docker-k8s-lab,name=mysql-pod 10.247.63.43 3306/TCP -``` - -If multiple services are running, then it can be narrowed by specifying labels: - -```sh -kubectl get -w po -l context=docker-k8s-lab,name=mysql-pod -NAME READY STATUS RESTARTS AGE -mysql-pod 1/1 Running 0 4m -``` - -This is also the selector label used by service to target pods. - -When a Service is run on a node, the kubelet adds a set of environment variables for each active Service. It supports both Docker links compatible variables and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores. - -Our service name is ``mysql-service'' and so ``MYSQL_SERVICE_SERVICE_HOST'' and ``MYSQL_SERVICE_SERVICE_PORT'' variables are available to other pods. This host and port variables are then used to create the JDBC resource in WildFly. - -### Start WildFly Replication Controller - -WildFly is a lightweight Java EE 7 compliant application server. It is wrapped in a Replication Controller and used as the Java EE runtime. - -In Kubernetes a [_Replication Controller_](https://kubernetes.io/docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a service it also has a desired number of replicas, and it will create or delete pods to ensure that the number of pods matches up with it's desired state. - -Here is definition of the MySQL service: [wildfly-rc.yaml](wildfly-rc.yaml). - - - - -Create this controller: - -```sh -kubectl create -f examples/javaee/wildfly-rc.yaml -``` - -Check status of the pod inside replication controller: - -```sh -kubectl get po -NAME READY STATUS RESTARTS AGE -mysql-pod 1/1 Running 0 1h -wildfly-rc-w2kk5 1/1 Running 0 6m -``` - -### Access the application - -Get IP address of the pod: - -```sh -kubectl get -o template po wildfly-rc-w2kk5 --template={{.status.podIP}} -10.246.1.23 -``` - -Log in to node and access the application: - -```sh -vagrant ssh node-1 -Last login: Thu Jul 16 00:24:36 2015 from 10.0.2.2 -[vagrant@kubernetes-node-1 ~]$ curl http://10.246.1.23:8080/employees/resources/employees/ -1Penny2Sheldon3Amy4Leonard5Bernadette6Raj7Howard8Priya -``` - -### Delete resources - -All resources created in this application can be deleted: - -```sh -kubectl delete -f examples/javaee/mysql-pod.yaml -kubectl delete -f examples/javaee/mysql-service.yaml -kubectl delete -f examples/javaee/wildfly-rc.yaml -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/javaee/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/javaee/README.md](https://github.com/kubernetes/examples/blob/master/staging/javaee/README.md) diff --git a/examples/javaweb-tomcat-sidecar/README.md b/examples/javaweb-tomcat-sidecar/README.md index 8e50d66d2fb..2537a86fde3 100644 --- a/examples/javaweb-tomcat-sidecar/README.md +++ b/examples/javaweb-tomcat-sidecar/README.md @@ -1,185 +1 @@ -## Java Web Application with Tomcat and Sidecar Container - -The following document describes the deployment of a Java Web application using Tomcat. Instead of packaging `war` file inside the Tomcat image or mount the `war` as a volume, we use a sidecar container as `war` file provider. - -### Prerequisites - -https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md - -### Overview - -This sidecar mode brings a new workflow for Java users: - -![](workflow.png?raw=true "Workflow") - -As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally separated from web server management. - -For example, if you are going to change the configurations of your Tomcat: - -```console -$ docker exec -it /bin/bash -# make some change, and then commit it to a new image -$ docker commit mytomcat:7.0-dev -``` - -Done! The new Tomcat image **will not** mess up with your `sample.war` file. You can re-use your tomcat image with lots of different war container images for lots of different apps without having to build lots of different images. - -Also this means that rolling out a new Tomcat to patch security or whatever else, doesn't require rebuilding N different images. - -**Why not put my `sample.war` in a host dir and mount it to tomcat container?** - -You have to **manage the volumes** in this case, for example, when you restart or scale the pod on another node, your contents is not ready on that host. - -Generally, we have to set up a distributed file system (NFS at least) volume to solve this (if we do not have GCE PD volume). But this is generally unnecessary. - -### How To Set this Up - -In Kubernetes a [_Pod_](https://kubernetes.io/docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume. - -Here is the config [javaweb.yaml](javaweb.yaml) for Java Web pod: - -NOTE: you should define `war` container **first** as it is the "provider". - - - -``` -apiVersion: v1 -kind: Pod -metadata: - name: javaweb -spec: - containers: - - image: resouer/sample:v1 - name: war - volumeMounts: - - mountPath: /app - name: app-volume - - image: resouer/mytomcat:7.0 - name: tomcat - command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"] - volumeMounts: - - mountPath: /root/apache-tomcat-7.0.42-v2/webapps - name: app-volume - ports: - - containerPort: 8080 - hostPort: 8001 - volumes: - - name: app-volume - emptyDir: {} -``` - - - -The only magic here is the `resouer/sample:v1` image: - -``` -FROM busybox:latest -ADD sample.war sample.war -CMD "sh" "mv.sh" -``` - -And the contents of `mv.sh` is: - -```sh -cp /sample.war /app -tail -f /dev/null -``` - -#### Explanation - -1. 'war' container only contains the `war` file of your app -2. 'war' container's CMD tries to copy `sample.war` to the `emptyDir` volume path -3. The last line of `tail -f` is just used to hold the container, as Replication Controller does not support one-off task -4. 'tomcat' container will load the `sample.war` from volume path - -What's more, if you don't want to enclose a build-in `mv.sh` script in the `war` container, you can use Pod lifecycle handler to do the copy work, here's a example [javaweb-2.yaml](javaweb-2.yaml): - - - - -``` -apiVersion: v1 -kind: Pod -metadata: - name: javaweb-2 -spec: - containers: - - image: resouer/sample:v2 - name: war - lifecycle: - postStart: - exec: - command: - - "cp" - - "/sample.war" - - "/app" - volumeMounts: - - mountPath: /app - name: app-volume - - image: resouer/mytomcat:7.0 - name: tomcat - command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"] - volumeMounts: - - mountPath: /root/apache-tomcat-7.0.42-v2/webapps - name: app-volume - ports: - - containerPort: 8080 - hostPort: 8001 - volumes: - - name: app-volume - emptyDir: {} -``` - - - -And the `resouer/sample:v2` Dockerfile is quite simple: - -``` -FROM busybox:latest -ADD sample.war sample.war -CMD "tail" "-f" "/dev/null" -``` - -#### Explanation - -1. 'war' container only contains the `war` file of your app -2. 'war' container's CMD uses `tail -f` to hold the container, nothing more -3. The `postStart` lifecycle handler will do `cp` after the `war` container is started -4. Again 'tomcat' container will load the `sample.war` from volume path - -Done! Now your `war` container contains nothing except `sample.war`, clean enough. - -### Test It Out - -Create the Java web pod: - -```console -$ kubectl create -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml -``` - -Check status of the pod: - -```console -$ kubectl get -w po -NAME READY STATUS RESTARTS AGE -javaweb-2 2/2 Running 0 7s -``` - -Wait for the status to `2/2` and `Running`. Then you can visit "Hello, World" page on `http://localhost:8001/sample/index.html` - -You can also test `javaweb.yaml` in the same way. - -### Delete Resources - -All resources created in this application can be deleted: - -```console -$ kubectl delete -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml -``` - - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/javaweb-tomcat-sidecar/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/javaweb-tomcat-sidecar/README.md](https://github.com/kubernetes/examples/blob/master/staging/javaweb-tomcat-sidecar/README.md) diff --git a/examples/job/expansions/README.md b/examples/job/expansions/README.md index ea72d5663ed..4346ab25313 100644 --- a/examples/job/expansions/README.md +++ b/examples/job/expansions/README.md @@ -1,7 +1 @@ - -This file has moved to: http://kubernetes.io/docs/user-guide/jobs/ - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/job/expansions/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/job/expansions/README.md](https://github.com/kubernetes/examples/blob/master/staging/job/expansions/README.md) diff --git a/examples/job/work-queue-1/README.md b/examples/job/work-queue-1/README.md index d32d130f667..8ac3051cfe5 100644 --- a/examples/job/work-queue-1/README.md +++ b/examples/job/work-queue-1/README.md @@ -1,7 +1 @@ - -This file has moved to: http://kubernetes.io/docs/user-guide/jobs/ - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/job/work-queue-1/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/job/work-queue-1/README.md](https://github.com/kubernetes/examples/blob/master/staging/job/work-queue-1/README.md) diff --git a/examples/job/work-queue-2/README.md b/examples/job/work-queue-2/README.md index 94355c0f6d4..f0e175d83fb 100644 --- a/examples/job/work-queue-2/README.md +++ b/examples/job/work-queue-2/README.md @@ -1,7 +1 @@ - -This file has moved to: http://kubernetes.io/docs/user-guide/jobs/ - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/job/work-queue-2/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/job/work-queue-2/README.md](https://github.com/kubernetes/examples/blob/master/staging/job/work-queue-2/README.md) diff --git a/examples/kubectl-container/README.md b/examples/kubectl-container/README.md index 031a3eaf85b..bb2c3d2eb86 100644 --- a/examples/kubectl-container/README.md +++ b/examples/kubectl-container/README.md @@ -1,31 +1 @@ -To access the Kubernetes API [from a Pod](https://kubernetes.io/docs/user-guide/accessing-the-cluster.md#accessing-the-api-from-a-pod) one of the solution is to run `kubectl proxy` in a so-called sidecar container within the Pod. To do this, you need to package `kubectl` in a container. It is useful when service accounts are being used for accessing the API and the old no-auth KUBERNETES_RO service is not available. Since all containers in a Pod share the same network namespace, containers will be able to reach the API on localhost. - -This example contains a [Dockerfile](Dockerfile) and [Makefile](Makefile) for packaging up `kubectl` into -a container and pushing the resulting container image on the Google Container Registry. You can modify the Makefile to push to a different registry if needed. - -Assuming that you have checked out the Kubernetes source code and setup your environment to be able to build it. The typical build step of this kubectl container will be: - - $ cd examples/kubectl-container - $ make kubectl - $ make tag - $ make container - $ make push - -It is not currently automated as part of a release process, so for the moment -this is an example of what to do if you want to package `kubectl` into a -container and use it within a pod. - -In the future, we may release consistently versioned groups of containers when -we cut a release, in which case the source of gcr.io/google_containers/kubectl -would become that automated process. - -[```pod.json```](pod.json) is provided as an example of running `kubectl` as a sidecar -container in a Pod, and to help you verify that `kubectl` works correctly in -this configuration. To launch this Pod, you will need a configured Kubernetes endpoint and `kubectl` installed locally, then simply create the Pod: - - $ kubectl create -f pod.json - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/kubectl-container/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/kubectl-container/README.md](https://github.com/kubernetes/examples/blob/master/staging/kubectl-container/README.md) diff --git a/examples/meteor/README.md b/examples/meteor/README.md index 781b8e99cc7..81f4d9fd4db 100644 --- a/examples/meteor/README.md +++ b/examples/meteor/README.md @@ -1,215 +1 @@ -Meteor on Kubernetes -==================== - -This example shows you how to package and run a -[Meteor](https://www.meteor.com/) app on Kubernetes. - -Get started on Google Compute Engine ------------------------------------- - -Meteor uses MongoDB, and we will use the `GCEPersistentDisk` type of -volume for persistent storage. Therefore, this example is only -applicable to [Google Compute -Engine](https://cloud.google.com/compute/). Take a look at the -[volumes documentation](https://kubernetes.io/docs/user-guide/volumes.md) for other options. - -First, if you have not already done so: - -1. [Create](https://cloud.google.com/compute/docs/quickstart) a -[Google Cloud Platform](https://cloud.google.com/) project. -2. [Enable -billing](https://developers.google.com/console/help/new/#billing). -3. Install the [gcloud SDK](https://cloud.google.com/sdk/). - -Authenticate with gcloud and set the gcloud default project name to -point to the project you want to use for your Kubernetes cluster: - -```sh -gcloud auth login -gcloud config set project -``` - -Next, start up a Kubernetes cluster: - -```sh -wget -q -O - https://get.k8s.io | bash -``` - -Please see the [Google Compute Engine getting started -guide](https://kubernetes.io/docs/getting-started-guides/gce.md) for full -details and other options for starting a cluster. - -Build a container for your Meteor app -------------------------------------- - -To be able to run your Meteor app on Kubernetes you need to build a -Docker container for it first. To do that you need to install -[Docker](https://www.docker.com) Once you have that you need to add 2 -files to your existing Meteor project `Dockerfile` and -`.dockerignore`. - -`Dockerfile` should contain the below lines. You should replace the -`ROOT_URL` with the actual hostname of your app. - -``` -FROM chees/meteor-kubernetes -ENV ROOT_URL http://myawesomeapp.com -``` - -The `.dockerignore` file should contain the below lines. This tells -Docker to ignore the files on those directories when it's building -your container. - -``` -.meteor/local -packages/*/.build* -``` - -You can see an example meteor project already set up at: -[meteor-gke-example](https://github.com/Q42/meteor-gke-example). Feel -free to use this app for this example. - -> Note: The next step will not work if you have added mobile platforms -> to your meteor project. Check with `meteor list-platforms` - -Now you can build your container by running this in -your Meteor project directory: - -``` -docker build -t my-meteor . -``` - -Pushing to a registry ---------------------- - -For the [Docker Hub](https://hub.docker.com/), tag your app image with -your username and push to the Hub with the below commands. Replace -`` with your Hub username. - -``` -docker tag my-meteor /my-meteor -docker push /my-meteor -``` - -For [Google Container -Registry](https://cloud.google.com/tools/container-registry/), tag -your app image with your project ID, and push to GCR. Replace -`` with your project ID. - -``` -docker tag my-meteor gcr.io//my-meteor -gcloud docker -- push gcr.io//my-meteor -``` - -Running -------- - -Now that you have containerized your Meteor app it's time to set up -your cluster. Edit [`meteor-controller.json`](meteor-controller.json) -and make sure the `image:` points to the container you just pushed to -the Docker Hub or GCR. - -We will need to provide MongoDB a persistent Kubernetes volume to -store its data. See the [volumes documentation](https://kubernetes.io/docs/user-guide/volumes.md) for -options. We're going to use Google Compute Engine persistent -disks. Create the MongoDB disk by running: - -``` -gcloud compute disks create --size=200GB mongo-disk -``` - -Now you can start Mongo using that disk: - -``` -kubectl create -f examples/meteor/mongo-pod.json -kubectl create -f examples/meteor/mongo-service.json -``` - -Wait until Mongo is started completely and then start up your Meteor app: - -``` -kubectl create -f examples/meteor/meteor-service.json -kubectl create -f examples/meteor/meteor-controller.json -``` - -Note that [`meteor-service.json`](meteor-service.json) creates a load balancer, so -your app should be available through the IP of that load balancer once -the Meteor pods are started. We also created the service before creating the rc to -aid the scheduler in placing pods, as the scheduler ranks pod placement according to -service anti-affinity (among other things). You can find the IP of your load balancer -by running: - -``` -kubectl get service meteor --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}" -``` - -You will have to open up port 80 if it's not open yet in your -environment. On Google Compute Engine, you may run the below command. - -``` -gcloud compute firewall-rules create meteor-80 --allow=tcp:80 --target-tags kubernetes-node -``` - -What is going on? ------------------ - -Firstly, the `FROM chees/meteor-kubernetes` line in your `Dockerfile` -specifies the base image for your Meteor app. The code for that image -is located in the `dockerbase/` subdirectory. Open up the `Dockerfile` -to get an insight of what happens during the `docker build` step. The -image is based on the Node.js official image. It then installs Meteor -and copies in your apps' code. The last line specifies what happens -when your app container is run. - -```sh -ENTRYPOINT MONGO_URL=mongodb://$MONGO_SERVICE_HOST:$MONGO_SERVICE_PORT /usr/local/bin/node main.js -``` - -Here we can see the MongoDB host and port information being passed -into the Meteor app. The `MONGO_SERVICE...` environment variables are -set by Kubernetes, and point to the service named `mongo` specified in -[`mongo-service.json`](mongo-service.json). See the [environment -documentation](https://kubernetes.io/docs/user-guide/container-environment.md) for more details. - -As you may know, Meteor uses long lasting connections, and requires -_sticky sessions_. With Kubernetes you can scale out your app easily -with session affinity. The -[`meteor-service.json`](meteor-service.json) file contains -`"sessionAffinity": "ClientIP"`, which provides this for us. See the -[service -documentation](https://kubernetes.io/docs/user-guide/services.md#virtual-ips-and-service-proxies) for -more information. - -As mentioned above, the mongo container uses a volume which is mapped -to a persistent disk by Kubernetes. In [`mongo-pod.json`](mongo-pod.json) the container -section specifies the volume: - -```json -{ - "volumeMounts": [ - { - "name": "mongo-disk", - "mountPath": "/data/db" - } -``` - -The name `mongo-disk` refers to the volume specified outside the -container section: - -```json -{ - "volumes": [ - { - "name": "mongo-disk", - "gcePersistentDisk": { - "pdName": "mongo-disk", - "fsType": "ext4" - } - } - ], -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/meteor/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/meteor/README.md](https://github.com/kubernetes/examples/blob/master/staging/meteor/README.md) diff --git a/examples/meteor/dockerbase/README.md b/examples/meteor/dockerbase/README.md index 2c04dff7806..2a90269486a 100644 --- a/examples/meteor/dockerbase/README.md +++ b/examples/meteor/dockerbase/README.md @@ -1,14 +1 @@ -Building the meteor-kubernetes base image ------------------------------------------ - -As a normal user you don't need to do this since the image is already built and pushed to Docker Hub. You can just use it as a base image. See [this example](https://github.com/Q42/meteor-gke-example/blob/master/Dockerfile). - -To build and push the base meteor-kubernetes image: - - docker build -t chees/meteor-kubernetes . - docker push chees/meteor-kubernetes - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/meteor/dockerbase/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/meteor/dockerbase/README.md](https://github.com/kubernetes/examples/blob/master/staging/meteor/dockerbase/README.md) diff --git a/examples/mysql-cinder-pd/README.md b/examples/mysql-cinder-pd/README.md index fb089a01cf8..ac84d00fe9c 100644 --- a/examples/mysql-cinder-pd/README.md +++ b/examples/mysql-cinder-pd/README.md @@ -1,51 +1 @@ -# MySQL installation with cinder volume plugin - -Cinder is a Block Storage service for OpenStack. This example shows how it can be used as an attachment mounted to a pod in Kubernets. - -### Prerequisites - -Start kubelet with cloud provider as openstack with a valid cloud config -Sample cloud_config: - -``` -[Global] -auth-url=https://os-identity.vip.foo.bar.com:5443/v2.0 -username=user -password=pass -region=region1 -tenant-id=0c331a1df18571594d49fe68asa4e -``` - -Currently the cinder volume plugin is designed to work only on linux hosts and offers ext4 and ext3 as supported fs types -Make sure that kubelet host machine has the following executables - -``` -/bin/lsblk -- To Find out the fstype of the volume -/sbin/mkfs.ext3 and /sbin/mkfs.ext4 -- To format the volume if required -/usr/bin/udevadm -- To probe the volume attached so that a symlink is created under /dev/disk/by-id/ with a virtio- prefix -``` - -Ensure cinder is installed and configured properly in the region in which kubelet is spun up - -### Example - -Create a cinder volume Ex: - -`cinder create --display-name=test-repo 2` - -Use the id of the cinder volume created to create a pod [definition](mysql.yaml) -Create a new pod with the definition - -`cluster/kubectl.sh create -f examples/mysql-cinder-pd/mysql.yaml` - -This should now - -1. Attach the specified volume to the kubelet's host machine -2. Format the volume if required (only if the volume specified is not already formatted to the fstype specified) -3. Mount it on the kubelet's host machine -4. Spin up a container with this volume mounted to the path specified in the pod definition - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/mysql-cinder-pd/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/mysql-cinder-pd/README.md](https://github.com/kubernetes/examples/blob/master/staging/mysql-cinder-pd/README.md) diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index a4a8a235360..48601ec8133 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -1,364 +1 @@ -# Persistent Installation of MySQL and WordPress on Kubernetes - -This example describes how to run a persistent installation of -[WordPress](https://wordpress.org/) and -[MySQL](https://www.mysql.com/) on Kubernetes. We'll use the -[mysql](https://registry.hub.docker.com/_/mysql/) and -[wordpress](https://registry.hub.docker.com/_/wordpress/) official -[Docker](https://www.docker.com/) images for this installation. (The -WordPress image includes an Apache server). - -Demonstrated Kubernetes Concepts: - -* [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) to - define persistent disks (disk lifecycle not tied to the Pods). -* [Services](https://kubernetes.io/docs/concepts/services-networking/service/) to enable Pods to - locate one another. -* [External Load Balancers](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer) - to expose Services externally. -* [Deployments](http://kubernetes.io/docs/user-guide/deployments/) to ensure Pods - stay up and running. -* [Secrets](http://kubernetes.io/docs/user-guide/secrets/) to store sensitive - passwords. - -## Quickstart - -Put your desired MySQL password in a file called `password.txt` with -no trailing newline. The first `tr` command will remove the newline if -your editor added one. - -**Note:** if your cluster enforces **_selinux_** and you will be using [Host Path](#host-path) for storage, then please follow this [extra step](#selinux). - -```shell -tr --delete '\n' .strippedpassword.txt && mv .strippedpassword.txt password.txt -kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/mysql-wordpress-pd/local-volumes.yaml -kubectl create secret generic mysql-pass --from-file=password.txt -kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/mysql-wordpress-pd/mysql-deployment.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/mysql-wordpress-pd/wordpress-deployment.yaml -``` - -## Table of Contents - - - -- [Persistent Installation of MySQL and WordPress on Kubernetes](#persistent-installation-of-mysql-and-wordpress-on-kubernetes) - - [Quickstart](#quickstart) - - [Table of Contents](#table-of-contents) - - [Cluster Requirements](#cluster-requirements) - - [Decide where you will store your data](#decide-where-you-will-store-your-data) - - [Host Path](#host-path) - - [SELinux](#selinux) - - [GCE Persistent Disk](#gce-persistent-disk) - - [Create the MySQL Password Secret](#create-the-mysql-password-secret) - - [Deploy MySQL](#deploy-mysql) - - [Deploy WordPress](#deploy-wordpress) - - [Visit your new WordPress blog](#visit-your-new-wordpress-blog) - - [Take down and restart your blog](#take-down-and-restart-your-blog) - - [Next Steps](#next-steps) - - - -## Cluster Requirements - -Kubernetes runs in a variety of environments and is inherently -modular. Not all clusters are the same. These are the requirements for -this example. - -* Kubernetes version 1.2 is required due to using newer features, such - at PV Claims and Deployments. Run `kubectl version` to see your - cluster version. -* [Cluster DNS](https://github.com/kubernetes/dns) will be used for service discovery. -* An [external load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer) - will be used to access WordPress. -* [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) - are used. You must create Persistent Volumes in your cluster to be - claimed. This example demonstrates how to create two types of - volumes, but any volume is sufficient. - -Consult a -[Getting Started Guide](http://kubernetes.io/docs/getting-started-guides/) -to set up a cluster and the -[kubectl](http://kubernetes.io/docs/user-guide/prereqs/) command-line client. - -## Decide where you will store your data - -MySQL and WordPress will each use a -[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) -to store their data. We will use a Persistent Volume Claim to claim an -available persistent volume. This example covers HostPath and -GCEPersistentDisk volumes. Choose one of the two, or see -[Types of Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes) -for more options. - -### Host Path - -Host paths are volumes mapped to directories on the host. **These -should be used for testing or single-node clusters only**. The data -will not be moved between nodes if the pod is recreated on a new -node. If the pod is deleted and recreated on a new node, data will be -lost. - -##### SELinux - -On systems supporting selinux it is preferred to leave it enabled/enforcing. -However, docker containers mount the host path with the "_svirt_sandbox_file_t_" -label type, which is incompatible with the default label type for /tmp ("_tmp_t_"), -resulting in a permissions error when the mysql container attempts to `chown` -_/var/lib/mysql_. -Therefore, on selinx systems using host path, you should pre-create the host path -directory (/tmp/data/) and change it's selinux label type to "_svirt_sandbox_file_t_", -as follows: - -```shell -## on every node: -mkdir -p /tmp/data -chmod a+rwt /tmp/data # match /tmp permissions -chcon -Rt svirt_sandbox_file_t /tmp/data -``` - -Continuing with host path, create the persistent volume objects in Kubernetes using -[local-volumes.yaml](local-volumes.yaml): - -```shell -export KUBE_REPO=https://raw.githubusercontent.com/kubernetes/kubernetes/master -kubectl create -f $KUBE_REPO/examples/mysql-wordpress-pd/local-volumes.yaml -``` - - -### GCE Persistent Disk - -This storage option is applicable if you are running on -[Google Compute Engine](http://kubernetes.io/docs/getting-started-guides/gce/). - -Create two persistent disks. You will need to create the disks in the -same [GCE zone](https://cloud.google.com/compute/docs/zones) as the -Kubernetes cluster. The default setup script will create the cluster -in the `us-central1-b` zone, as seen in the -[config-default.sh](../../cluster/gce/config-default.sh) file. Replace -`` below with the appropriate zone. The names `wordpress-1` and -`wordpress-2` must match the `pdName` fields we have specified in -[gce-volumes.yaml](gce-volumes.yaml). - -```shell -gcloud compute disks create --size=20GB --zone= wordpress-1 -gcloud compute disks create --size=20GB --zone= wordpress-2 -``` - -Create the persistent volume objects in Kubernetes for those disks: - -```shell -export KUBE_REPO=https://raw.githubusercontent.com/kubernetes/kubernetes/master -kubectl create -f $KUBE_REPO/examples/mysql-wordpress-pd/gce-volumes.yaml -``` - -## Create the MySQL Password Secret - -Use a [Secret](http://kubernetes.io/docs/user-guide/secrets/) object -to store the MySQL password. First create a file (in the same directory -as the wordpress sample files) called -`password.txt` and save your password in it. Make sure to not have a -trailing newline at the end of the password. The first `tr` command -will remove the newline if your editor added one. Then, create the -Secret object. - -```shell -tr --delete '\n' .strippedpassword.txt && mv .strippedpassword.txt password.txt -kubectl create secret generic mysql-pass --from-file=password.txt -``` - -This secret is referenced by the MySQL and WordPress pod configuration -so that those pods will have access to it. The MySQL pod will set the -database password, and the WordPress pod will use the password to -access the database. - -## Deploy MySQL - -Now that the persistent disks and secrets are defined, the Kubernetes -pods can be launched. Start MySQL using -[mysql-deployment.yaml](mysql-deployment.yaml). - -```shell -kubectl create -f $KUBE_REPO/examples/mysql-wordpress-pd/mysql-deployment.yaml -``` - -Take a look at [mysql-deployment.yaml](mysql-deployment.yaml), and -note that we've defined a volume mount for `/var/lib/mysql`, and then -created a Persistent Volume Claim that looks for a 20G volume. This -claim is satisfied by any volume that meets the requirements, in our -case one of the volumes we created above. - -Also look at the `env` section and see that we specified the password -by referencing the secret `mysql-pass` that we created above. Secrets -can have multiple key:value pairs. Ours has only one key -`password.txt` which was the name of the file we used to create the -secret. The [MySQL image](https://hub.docker.com/_/mysql/) sets the -database password using the `MYSQL_ROOT_PASSWORD` environment -variable. - -It may take a short period before the new pod reaches the `Running` -state. List all pods to see the status of this new pod. - -```shell -kubectl get pods -``` - -``` -NAME READY STATUS RESTARTS AGE -wordpress-mysql-cqcf4-9q8lo 1/1 Running 0 1m -``` - -Kubernetes logs the stderr and stdout for each pod. Take a look at the -logs for a pod by using `kubectl log`. Copy the pod name from the -`get pods` command, and then: - -```shell -kubectl logs -``` - -``` -... -2016-02-19 16:58:05 1 [Note] InnoDB: 128 rollback segment(s) are active. -2016-02-19 16:58:05 1 [Note] InnoDB: Waiting for purge to start -2016-02-19 16:58:05 1 [Note] InnoDB: 5.6.29 started; log sequence number 1626007 -2016-02-19 16:58:05 1 [Note] Server hostname (bind-address): '*'; port: 3306 -2016-02-19 16:58:05 1 [Note] IPv6 is available. -2016-02-19 16:58:05 1 [Note] - '::' resolves to '::'; -2016-02-19 16:58:05 1 [Note] Server socket created on IP: '::'. -2016-02-19 16:58:05 1 [Warning] 'proxies_priv' entry '@ root@wordpress-mysql-cqcf4-9q8lo' ignored in --skip-name-resolve mode. -2016-02-19 16:58:05 1 [Note] Event Scheduler: Loaded 0 events -2016-02-19 16:58:05 1 [Note] mysqld: ready for connections. -Version: '5.6.29' socket: '/var/run/mysqld/mysqld.sock' port: 3306 MySQL Community Server (GPL) -``` - -Also in [mysql-deployment.yaml](mysql-deployment.yaml) we created a -service to allow other pods to reach this mysql instance. The name is -`wordpress-mysql` which resolves to the pod IP. - -Up to this point one Deployment, one Pod, one PVC, one Service, one Endpoint, -two PVs, and one Secret have been created, shown below: - -```shell -kubectl get deployment,pod,svc,endpoints,pvc -l app=wordpress -o wide && \ - kubectl get secret mysql-pass && \ - kubectl get pv -``` - -```shell -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deploy/wordpress-mysql 1 1 1 1 3m -NAME READY STATUS RESTARTS AGE IP NODE -po/wordpress-mysql-3040864217-40soc 1/1 Running 0 3m 172.17.0.2 127.0.0.1 -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR -svc/wordpress-mysql None 3306/TCP 3m app=wordpress,tier=mysql -NAME ENDPOINTS AGE -ep/wordpress-mysql 172.17.0.2:3306 3m -NAME STATUS VOLUME CAPACITY ACCESSMODES AGE -pvc/mysql-pv-claim Bound local-pv-2 20Gi RWO 3m -NAME TYPE DATA AGE -mysql-pass Opaque 1 3m -NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE -local-pv-1 20Gi RWO Available 3m -local-pv-2 20Gi RWO Bound default/mysql-pv-claim 3m -``` - -## Deploy WordPress - -Next deploy WordPress using -[wordpress-deployment.yaml](wordpress-deployment.yaml): - -```shell -kubectl create -f $KUBE_REPO/examples/mysql-wordpress-pd/wordpress-deployment.yaml -``` - -Here we are using many of the same features, such as a volume claim -for persistent storage and a secret for the password. - -The [WordPress image](https://hub.docker.com/_/wordpress/) accepts the -database hostname through the environment variable -`WORDPRESS_DB_HOST`. We set the env value to the name of the MySQL -service we created: `wordpress-mysql`. - -The WordPress service has the setting `type: LoadBalancer`. This will -set up the wordpress service behind an external IP. - -Find the external IP for your WordPress service. **It may take a minute -to have an external IP assigned to the service, depending on your -cluster environment.** - -```shell -kubectl get services wordpress -``` - -``` -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -wordpress 10.0.0.5 1.2.3.4 80/TCP 19h -``` - -## Visit your new WordPress blog - -Now, we can visit the running WordPress app. Use the external IP of -the service that you obtained above. - -``` -http:// -``` - -You should see the familiar WordPress init page. - -![WordPress init page](WordPress.png "WordPress init page") - -> Warning: Do not leave your WordPress installation on this page. If -> it is found by another user, they can set up a website on your -> instance and use it to serve potentially malicious content. You -> should either continue with the installation past the point at which -> you create your username and password, delete your instance, or set -> up a firewall to restrict access. - -## Take down and restart your blog - -Set up your WordPress blog and play around with it a bit. Then, take -down its pods and bring them back up again. Because you used -persistent disks, your blog state will be preserved. - -All of the resources are labeled with `app=wordpress`, so you can -easily bring them down using a label selector: - -```shell -kubectl delete deployment,service -l app=wordpress -kubectl delete secret mysql-pass -``` - -Later, re-creating the resources with the original commands will pick -up the original disks with all your data intact. Because we did not -delete the PV Claims, no other pods in the cluster could claim them -after we deleted our pods. Keeping the PV Claims also ensured -recreating the Pods did not cause the PD to switch Pods. - -If you are ready to release your persistent volumes and the data on them, run: - -```shell -kubectl delete pvc -l app=wordpress -``` - -And then delete the volume objects themselves: - -```shell -kubectl delete pv local-pv-1 local-pv-2 -``` - -or - -```shell -kubectl delete pv wordpress-pv-1 wordpress-pv-2 -``` - -## Next Steps - -* [Introspection and Debugging](http://kubernetes.io/docs/user-guide/introspection-and-debugging/) -* [Jobs](http://kubernetes.io/docs/user-guide/jobs/) may be useful to run SQL queries. -* [Exec](http://kubernetes.io/docs/user-guide/getting-into-containers/) -* [Port Forwarding](http://kubernetes.io/docs/user-guide/connecting-to-applications-port-forward/) - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/mysql-wordpress-pd/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/mysql-wordpress-pd/README.md](https://github.com/kubernetes/examples/blob/master/mysql-wordpress-pd/README.md) diff --git a/examples/newrelic/README.md b/examples/newrelic/README.md index 2a646bdb6f0..e5c429c7665 100644 --- a/examples/newrelic/README.md +++ b/examples/newrelic/README.md @@ -1,157 +1 @@ -## New Relic Server Monitoring Agent Example - -This example shows how to run a New Relic server monitoring agent as a pod in a DaemonSet on an existing Kubernetes cluster. - -This example will create a DaemonSet which places the New Relic monitoring agent on every node in the cluster. It's also fairly trivial to exclude specific Kubernetes nodes from the DaemonSet to just monitor specific servers. - -### Step 0: Prerequisites - -This process will create privileged containers which have full access to the host system for logging. Beware of the security implications of this. - -If you are using a Salt based KUBERNETES\_PROVIDER (**gce**, **vagrant**, **aws**), you should make sure the creation of privileged containers via the API is enabled. Check `cluster/saltbase/pillar/privilege.sls`. - -DaemonSets must be enabled on your cluster. Instructions for enabling DaemonSet can be found [here](https://kubernetes.io/docs/api.md#enabling-the-extensions-group). - -### Step 1: Configure New Relic Agent - -The New Relic agent is configured via environment variables. We will configure these environment variables in a sourced bash script, encode the environment file data, and store it in a secret which will be loaded at container runtime. - -The [New Relic Linux Server configuration page] -(https://docs.newrelic.com/docs/servers/new-relic-servers-linux/installation-configuration/configuring-servers-linux) lists all the other settings for nrsysmond. - -To create an environment variable for a setting, prepend NRSYSMOND_ to its name. For example, - -```console -loglevel=debug -``` - -translates to - -```console -NRSYSMOND_loglevel=debug -``` - -Edit examples/newrelic/nrconfig.env and set up the environment variables for your NewRelic agent. Be sure to edit the license key field and fill in your own New Relic license key. - -Now, let's vendor the config into a secret. - -```console -$ cd examples/newrelic/ -$ ./config-to-secret.sh -``` - - - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: newrelic-config -type: Opaque -data: - config: {{config_data}} -``` - -[Download example](newrelic-config-template.yaml?raw=true) - - -The script will encode the config file and write it to `newrelic-config.yaml`. - -Finally, submit the config to the cluster: - -```console -$ kubectl create -f examples/newrelic/newrelic-config.yaml -``` - -### Step 2: Create the DaemonSet definition. - -The DaemonSet definition instructs Kubernetes to place a newrelic sysmond agent on each Kubernetes node. - - - -```yaml -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: newrelic-agent - labels: - tier: monitoring - app: newrelic-agent - version: v1 -spec: - template: - metadata: - labels: - name: newrelic - spec: - # Filter to specific nodes: - # nodeSelector: - # app: newrelic - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - resources: - requests: - cpu: 0.15 - securityContext: - privileged: true - env: - - name: NRSYSMOND_logfile - value: "/var/log/nrsysmond.log" - image: newrelic/nrsysmond - name: newrelic - command: [ "bash", "-c", "source /etc/kube-newrelic/config && /usr/sbin/nrsysmond -E -F" ] - volumeMounts: - - name: newrelic-config - mountPath: /etc/kube-newrelic - readOnly: true - - name: dev - mountPath: /dev - - name: run - mountPath: /var/run/docker.sock - - name: sys - mountPath: /sys - - name: log - mountPath: /var/log - volumes: - - name: newrelic-config - secret: - secretName: newrelic-config - - name: dev - hostPath: - path: /dev - - name: run - hostPath: - path: /var/run/docker.sock - - name: sys - hostPath: - path: /sys - - name: log - hostPath: - path: /var/log -``` - -[Download example](newrelic-daemonset.yaml?raw=true) - - -The daemonset instructs Kubernetes to spawn pods on each node, mapping /dev/, /run/, /sys/, and /var/log to the container. It also maps the secrets we set up earlier to /etc/kube-newrelic/config, and sources them in the startup script, configuring the agent properly. - -#### DaemonSet customization - -- To include a custom hostname prefix (or other per-container environment variables that can be generated at run-time), you can modify the DaemonSet `command` value: - -``` -command: [ "bash", "-c", "source /etc/kube-newrelic/config && export NRSYSMOND_hostname=mycluster-$(hostname) && /usr/sbin/nrsysmond -E -F" ] -``` - -When the New Relic agent starts, `NRSYSMOND_hostname` is set using the output of `hostname` with `mycluster` prepended. - - -### Known issues - -It's a bit cludgy to define the environment variables like we do here in these config files. There is [another issue](https://github.com/kubernetes/kubernetes/issues/4710) to discuss adding mapping secrets to environment variables in Kubernetes. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/newrelic/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/newrelic/README.md](https://github.com/kubernetes/examples/blob/master/staging/newrelic/README.md) diff --git a/examples/nodesjs-mongodb/README.md b/examples/nodesjs-mongodb/README.md index ac564666773..7f84108b399 100644 --- a/examples/nodesjs-mongodb/README.md +++ b/examples/nodesjs-mongodb/README.md @@ -1,282 +1 @@ -## Node.js and MongoDB on Kubernetes - -The following document describes the deployment of a basic Node.js and MongoDB web stack on Kubernetes. Currently this example does not use replica sets for MongoDB. - -For more a in-depth explanation of this example, please [read this post.](https://medium.com/google-cloud-platform-developer-advocates/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d) - -### Prerequisites - -This example assumes that you have a basic understanding of Kubernetes conecepts (Pods, Services, Replication Controllers), a Kubernetes cluster up and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. - -Note: This example was tested on [Google Container Engine](https://cloud.google.com/container-engine/docs/). Some optional commands require the [Google Cloud SDK](https://cloud.google.com/sdk/). - -### Creating the MongoDB Service - -The first thing to do is create the MongoDB Service. This service is used by the other Pods in the cluster to find and connect to the MongoDB instance. - -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - name: mongo - name: mongo -spec: - ports: - - port: 27017 - targetPort: 27017 - selector: - name: mongo -``` - -[Download file](mongo-service.yaml) - -This service looks for all pods with the "mongo" tag, and creates a Service on port 27017 that targets port 27017 on the MongoDB pods. Port 27017 is the standard MongoDB port. - -To start the service, run: - -```sh -kubectl create -f examples/nodesjs-mongodb/mongo-service.yaml -``` - -### Creating the MongoDB Controller - -Next, create the MongoDB instance that runs the Database. Databases also need persistent storage, which will be different for each platform. - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - labels: - name: mongo - name: mongo-controller -spec: - replicas: 1 - template: - metadata: - labels: - name: mongo - spec: - containers: - - image: mongo - name: mongo - ports: - - name: mongo - containerPort: 27017 - hostPort: 27017 - volumeMounts: - - name: mongo-persistent-storage - mountPath: /data/db - volumes: - - name: mongo-persistent-storage - gcePersistentDisk: - pdName: mongo-disk - fsType: ext4 -``` - -[Download file](mongo-controller.yaml) - -Looking at this file from the bottom up: - -First, it creates a volume called "mongo-persistent-storage." - -In the above example, it is using a "gcePersistentDisk" to back the storage. This is only applicable if you are running your Kubernetes cluster in Google Cloud Platform. - -If you don't already have a [Google Persistent Disk](https://cloud.google.com/compute/docs/disks) created in the same zone as your cluster, create a new disk in the same Google Compute Engine / Container Engine zone as your cluster with this command: - -```sh -gcloud compute disks create --size=200GB --zone=$ZONE mongo-disk -``` - -If you are using AWS, replace the "volumes" section with this (untested): - -```yaml - volumes: - - name: mongo-persistent-storage - awsElasticBlockStore: - volumeID: aws://{region}/{volume ID} - fsType: ext4 -``` - -If you don't have a EBS volume in the same region as your cluster, create a new EBS volume in the same region with this command (untested): - -```sh -ec2-create-volume --size 200 --region $REGION --availability-zone $ZONE -``` - -This command will return a volume ID to use. - -For other storage options (iSCSI, NFS, OpenStack), please follow the documentation. - -Now that the volume is created and usable by Kubernetes, the next step is to create the Pod. - -Looking at the container section: It uses the official MongoDB container, names itself "mongo", opens up port 27017, and mounts the disk to "/data/db" (where the mongo container expects the data to be). - -Now looking at the rest of the file, it is creating a Replication Controller with one replica, called mongo-controller. It is important to use a Replication Controller and not just a Pod, as a Replication Controller will restart the instance in case it crashes. - -Create this controller with this command: - -```sh -kubectl create -f examples/nodesjs-mongodb/mongo-controller.yaml -``` - -At this point, MongoDB is up and running. - -Note: There is no password protection or auth running on the database by default. Please keep this in mind! - -### Creating the Node.js Service - -The next step is to create the Node.js service. This service is what will be the endpoint for the web site, and will load balance requests to the Node.js instances. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: web - labels: - name: web -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 3000 - protocol: TCP - selector: - name: web -``` - -[Download file](web-service.yaml) - -This service is called "web," and it uses a [LoadBalancer](https://kubernetes.io/docs/user-guide/services.md#type-loadbalancer) to distribute traffic on port 80 to port 3000 running on Pods with the "web" tag. Port 80 is the standard HTTP port, and port 3000 is the standard Node.js port. - -On Google Container Engine, a [network load balancer](https://cloud.google.com/compute/docs/load-balancing/network/) and [firewall rule](https://cloud.google.com/compute/docs/networking#addingafirewall) to allow traffic are automatically created. - -To start the service, run: - -```sh -kubectl create -f examples/nodesjs-mongodb/web-service.yaml -``` - -If you are running on a platform that does not support LoadBalancer (i.e Bare Metal), you need to use a [NodePort](https://kubernetes.io/docs/user-guide/services.md#type-nodeport) with your own load balancer. - -You may also need to open appropriate Firewall ports to allow traffic. - -### Creating the Node.js Controller - -The final step is deploying the Node.js container that will run the application code. This container can easily by replaced by any other web serving frontend, such as Rails, LAMP, Java, Go, etc. - -The most important thing to keep in mind is how to access the MongoDB service. - -If you were running MongoDB and Node.js on the same server, you would access MongoDB like so: - -```javascript -MongoClient.connect('mongodb://localhost:27017/database-name', function(err, db) { console.log(db); }); -``` - -With this Kubernetes setup, that line of code would become: - -```javascript -MongoClient.connect('mongodb://mongo:27017/database-name', function(err, db) { console.log(db); }); -``` - -The MongoDB Service previously created tells Kubernetes to configure the cluster so 'mongo' points to the MongoDB instance created earlier. - -#### Custom Container - -You should have your own container that runs your Node.js code hosted in a container registry. - -See [this example](https://medium.com/google-cloud-platform-developer-advocates/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d#8edc) to see how to make your own Node.js container. - -Once you have created your container, create the web controller. - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - labels: - name: web - name: web-controller -spec: - replicas: 2 - selector: - name: web - template: - metadata: - labels: - name: web - spec: - containers: - - image: - name: web - ports: - - containerPort: 3000 - name: http-server -``` - -[Download file](web-controller.yaml) - -Replace with the url of your container. - -This Controller will create two replicas of the Node.js container, and each Node.js container will have the tag "web" and expose port 3000. The Service LoadBalancer will forward port 80 traffic to port 3000 automatically, along with load balancing traffic between the two instances. - -To start the Controller, run: - -```sh -kubectl create -f examples/nodesjs-mongodb/web-controller.yaml -``` - -#### Demo Container - -If you DON'T want to create a custom container, you can use the following YAML file: - -Note: You cannot run both Controllers at the same time, as they both try to control the same Pods. - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - labels: - name: web - name: web-controller -spec: - replicas: 2 - selector: - name: web - template: - metadata: - labels: - name: web - spec: - containers: - - image: node:0.10.40 - command: ['/bin/sh', '-c'] - args: ['cd /home && git clone https://github.com/ijason/NodeJS-Sample-App.git demo && cd demo/EmployeeDB/ && npm install && sed -i -- ''s/localhost/mongo/g'' app.js && node app.js'] - name: web - ports: - - containerPort: 3000 - name: http-server -``` - -[Download file](web-controller-demo.yaml) - -This will use the default Node.js container, and will pull and execute code at run time. This is not recommended; typically, your code should be part of the container. - -To start the Controller, run: - -```sh -kubectl create -f examples/nodesjs-mongodb/web-controller-demo.yaml -``` - -### Testing it out - -Now that all the components are running, visit the IP address of the load balancer to access the website. - -With Google Cloud Platform, get the IP address of all load balancers with the following command: - -```sh -gcloud compute forwarding-rules list -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/nodesjs-mongodb/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/nodesjs-mongodb/README.md](https://github.com/kubernetes/examples/blob/master/staging/nodesjs-mongodb/README.md) diff --git a/examples/oms/README.md b/examples/oms/README.md index 477b53c88c6..096e03af232 100644 --- a/examples/oms/README.md +++ b/examples/oms/README.md @@ -1,61 +1 @@ -# Microsoft Operations Management Suite (OMS) Container Monitoring Example - -The [Microsoft Operations Management Suite (OMS)](https://www.microsoft.com/en-us/cloud-platform/operations-management-suite) is a software-as-a-service offering from Microsoft that allows Enterprise IT to manage any hybrid cloud. - -This example will create a DaemonSet to deploy the OMS Linux agents running as containers to every node in the Kubernetes cluster. - -### Supported Linux Operating Systems & Docker -- Docker 1.10 thru 1.12.1 - -- An x64 version of the following: - - Ubuntu 14.04 LTS, 16.04 LTS - - CoreOS (stable) - - Amazon Linux 2016.09.0 - - openSUSE 13.2 - - CentOS 7 - - SLES 12 - - RHEL 7.2 - -## Step 1 - -If you already have a Microsoft Azure account, you can quickly create a free OMS account by following the steps [here](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-get-started#sign-up-quickly-using-microsoft-azure). - -If you don't have a Microsoft Azure account, you can create a free OMS account by following the guide [here](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-get-started#sign-up-in-3-steps-using-oms). - -## Step 2 - -You will need to edit the [omsagent-daemonset.yaml](./omsagent-daemonset.yaml) file to add your Workspace ID and Primary Key of your OMS account. - -``` -- env: - - name: WSID - value: - - name: KEY - value: -``` - -The Workspace ID and Primary Key can be found inside the OMS Portal under Settings in the connected sources tab (see below screenshot). -![connected-resources](./images/connected-resources.png) - -## Step 3 - -Run the following command to deploy the OMS agent to your Kubernetes nodes: - -``` -kubectl -f omsagent-daemonset.yaml -``` - -## Step 4 - -Add the Container solution to your OMS workspace: - -1. Log in to the OMS portal. -2. Click the Solutions Gallery tile. -3. On the OMS Solutions Gallery page, click on Containers. -4. On the page for the Containers solution, detailed information about the solution is displayed. Click Add. - -A new tile for the Container solution that you added appears on the Overview page in OMS. It would take 5 minutes for your data to appear in OMS. - -![oms-portal](./images/oms-portal.png) - -![coms-container-solution](./images/oms-container-solution.png) \ No newline at end of file +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/oms/README.md](https://github.com/kubernetes/examples/blob/master/staging/oms/README.md) diff --git a/examples/openshift-origin/README.md b/examples/openshift-origin/README.md index 34b24351805..c01effb2b06 100644 --- a/examples/openshift-origin/README.md +++ b/examples/openshift-origin/README.md @@ -1,211 +1 @@ -## OpenShift Origin example - -This example shows how to run OpenShift Origin as a pod on an existing Kubernetes cluster. - -OpenShift Origin runs with a rich set of role based policy rules out of the box that requires authentication from users via certificates. When run as a pod on an existing Kubernetes cluster, it proxies access to the underlying Kubernetes services to provide security. - -As a result, this example is a complex end-to-end configuration that shows how to configure certificates for a service that runs on Kubernetes, and requires a number of configuration files to be injected dynamically via a secret volume to the pod. - -This example will create a pod running the OpenShift Origin master. In addition, it will run a three-pod etcd setup to hold OpenShift content. OpenShift embeds Kubernetes in the stand-alone setup, so the configuration for OpenShift when it is running against an external Kubernetes cluster is different: content specific to Kubernetes will be stored in the Kubernetes etcd repository (i.e. pods, services, replication controllers, etc.), but OpenShift specific content (builds, images, users, policies, etc.) are stored in its etcd setup. - -### Step 0: Prerequisites - -This example assumes that you have an understanding of Kubernetes and that you have forked the repository. - -OpenShift Origin creates privileged containers when running Docker builds during the source-to-image process. - -If you are using a Salt based KUBERNETES_PROVIDER (**gce**, **vagrant**, **aws**), you should enable the -ability to create privileged containers via the API. - -```sh -$ cd kubernetes -$ vi cluster/saltbase/pillar/privilege.sls - -# If true, allow privileged containers to be created by API -allow_privileged: true -``` - -Now spin up a cluster using your preferred KUBERNETES_PROVIDER. Remember that `kube-up.sh` may start other pods on your nodes, so ensure that you have enough resources to run the five pods for this example. - - -```sh -$ export KUBERNETES_PROVIDER=${YOUR_PROVIDER} -$ cluster/kube-up.sh -``` - -Next, let's setup some variables, and create a local folder that will hold generated configuration files. - -```sh -$ export OPENSHIFT_EXAMPLE=$(pwd)/examples/openshift-origin -$ export OPENSHIFT_CONFIG=${OPENSHIFT_EXAMPLE}/config -$ mkdir ${OPENSHIFT_CONFIG} - -$ export ETCD_INITIAL_CLUSTER_TOKEN=$(python -c "import string; import random; print(''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(40)))") -$ export ETCD_DISCOVERY_TOKEN=$(python -c "import string; import random; print(\"etcd-cluster-\" + ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(5)))") -$ sed -i.bak -e "s/INSERT_ETCD_INITIAL_CLUSTER_TOKEN/\"${ETCD_INITIAL_CLUSTER_TOKEN}\"/g" -e "s/INSERT_ETCD_DISCOVERY_TOKEN/\"${ETCD_DISCOVERY_TOKEN}\"/g" ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml -``` - -This will have created a `etcd-controller.yaml.bak` file in your directory, which you should remember to restore when doing cleanup (or use the given `cleanup.sh`). Finally, let's start up the external etcd pods and the discovery service necessary for their initialization: - -```sh -$ kubectl create -f examples/openshift-origin/openshift-origin-namespace.yaml -$ kubectl create -f examples/openshift-origin/etcd-discovery-controller.yaml --namespace="openshift-origin" -$ kubectl create -f examples/openshift-origin/etcd-discovery-service.yaml --namespace="openshift-origin" -$ kubectl create -f examples/openshift-origin/etcd-controller.yaml --namespace="openshift-origin" -$ kubectl create -f examples/openshift-origin/etcd-service.yaml --namespace="openshift-origin" -``` - -### Step 1: Export your Kubernetes configuration file for use by OpenShift pod - -OpenShift Origin uses a configuration file to know how to access your Kubernetes cluster with administrative authority. - -``` -$ cluster/kubectl.sh config view --output=yaml --flatten=true --minify=true > ${OPENSHIFT_CONFIG}/kubeconfig -``` - -The output from this command will contain a single file that has all the required information needed to connect to your Kubernetes cluster that you previously provisioned. This file should be considered sensitive, so do not share this file with untrusted parties. - -We will later use this file to tell OpenShift how to bootstrap its own configuration. - -### Step 2: Create an External Load Balancer to Route Traffic to OpenShift - -An external load balancer is needed to route traffic to our OpenShift master service that will run as a pod on your Kubernetes cluster. - - -```sh -$ cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml --namespace="openshift-origin" -``` - -### Step 3: Generate configuration file for your OpenShift master pod - -The OpenShift master requires a configuration file as input to know how to bootstrap the system. - -In order to build this configuration file, we need to know the public IP address of our external load balancer in order to build default certificates. - -Grab the public IP address of the service we previously created: the two-line script below will attempt to do so, but make sure to check that the IP was set as a result - if it was not, try again after a couple seconds. - - -```sh -$ export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}") -$ echo ${PUBLIC_OPENSHIFT_IP} -``` - -You can automate the process with the following script, as it might take more than a minute for the IP to be set and discoverable. - -```shell -$ while [ ${#PUBLIC_OPENSHIFT_IP} -lt 1 ]; do - echo -n . - sleep 1 - { - export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}") - } 2> ${OPENSHIFT_EXAMPLE}/openshift-startup.log - if [[ ! ${PUBLIC_OPENSHIFT_IP} =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then - export PUBLIC_OPENSHIFT_IP="" - fi - done -$ echo -$ echo "Public OpenShift IP set to: ${PUBLIC_OPENSHIFT_IP}" -``` - -Ensure you have a valid PUBLIC_IP address before continuing in the example. - -We now need to run a command on your host to generate a proper OpenShift configuration. To do this, we will volume mount the configuration directory that holds your Kubernetes kubeconfig file from the prior step. - - -```sh -$ docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig=/config/kubeconfig --master=https://localhost:8443 --public-master=https://${PUBLIC_OPENSHIFT_IP}:8443 --etcd=http://etcd:2379 -``` - -You should now see a number of certificates minted in your configuration directory, as well as a master-config.yaml file that tells the OpenShift master how to execute. We need to make some adjustments to this configuration directory in order to allow the OpenShift cluster to use Kubernetes serviceaccounts. First, write the Kubernetes service account key to the `${OPENSHIFT_CONFIG}` directory. The following script assumes you are using GCE. If you are not, use `scp` or `ssh` to get the key from the master node running Kubernetes. It is usually located at `/srv/kubernetes/server.key`. - -```shell -$ export ZONE=$(gcloud compute instances list | grep "${KUBE_GCE_INSTANCE_PREFIX}\-master" | awk '{print $2}' | head -1) -$ echo "sudo cat /srv/kubernetes/server.key; exit;" | gcloud compute ssh ${KUBE_GCE_INSTANCE_PREFIX}-master --zone ${ZONE} | grep -Ex "(^\-.*\-$|^\S+$)" > ${OPENSHIFT_CONFIG}/serviceaccounts.private.key - -``` - -Although we are retrieving the private key from the Kubernetes master, OpenShift will take care of the conversion for us so that serviceaccounts are created with the public key. Edit your `master-config.yaml` file in the `${OPENSHIFT_CONFIG}` directory to add `serviceaccounts.private.key` to the list of `publicKeyFiles`: - -```shell -$ sed -i -e 's/publicKeyFiles:.*$/publicKeyFiles:/g' -e '/publicKeyFiles:/a \ \ - serviceaccounts.private.key' ${OPENSHIFT_CONFIG}/master-config.yaml -``` - -Now, the configuration files are complete. In the next step, we will bundle the resulting configuration into a Kubernetes Secret that our OpenShift master pod will consume. - -### Step 4: Bundle the configuration into a Secret - -We now need to bundle the contents of our configuration into a secret for use by our OpenShift master pod. - -OpenShift includes an experimental command to make this easier. - -First, update the ownership for the files previously generated: - -``` -$ sudo -E chown -R ${USER} ${OPENSHIFT_CONFIG} -``` - -Then run the following command to collapse them into a Kubernetes secret. - -```sh -$ docker run -it --privileged -e="KUBECONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin cli secrets new openshift-config /config -o json &> examples/openshift-origin/secret.json -``` - -Now, lets create the secret in your Kubernetes cluster. - -```sh -$ cluster/kubectl.sh create -f examples/openshift-origin/secret.json --namespace="openshift-origin" -``` - -**NOTE: This secret is secret and should not be shared with untrusted parties.** - -### Step 5: Deploy OpenShift Master - -We are now ready to deploy OpenShift. - -We will deploy a pod that runs the OpenShift master. The OpenShift master will delegate to the underlying Kubernetes -system to manage Kubernetes specific resources. For the sake of simplicity, the OpenShift master will run with an embedded etcd to hold OpenShift specific content. This demonstration will evolve in the future to show how to run etcd in a pod so that content is not destroyed if the OpenShift master fails. - -```sh -$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml --namespace="openshift-origin" -``` - -You should now get a pod provisioned whose name begins with openshift. - -```sh -$ cluster/kubectl.sh get pods | grep openshift -$ cluster/kubectl.sh log openshift-t7147 origin -Running: cluster/../cluster/gce/../../cluster/../_output/dockerized/bin/linux/amd64/kubectl logs openshift-t7t47 origin -2015-04-30T15:26:00.454146869Z I0430 15:26:00.454005 1 start_master.go:296] Starting an OpenShift master, reachable at 0.0.0.0:8443 (etcd: [https://10.0.27.2:4001]) -2015-04-30T15:26:00.454231211Z I0430 15:26:00.454223 1 start_master.go:297] OpenShift master public address is https://104.197.73.241:8443 -``` - -Depending upon your cloud provider, you may need to open up an external firewall rule for tcp:8443. For GCE, you can run the following: - -```sh -$ gcloud compute --project "your-project" firewall-rules create "origin" --allow tcp:8443 --network "your-network" --source-ranges "0.0.0.0/0" -``` - -Consult your cloud provider's documentation for more information. - -Open a browser and visit the OpenShift master public address reported in your log. - -You can use the CLI commands by running the following: - -```sh -$ docker run --privileged --entrypoint="/usr/bin/bash" -it -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin -$ osc config use-context public-default -$ osc --help -``` - -## Cleanup - -Clean up your cluster from resources created with this example: - -```sh -$ ${OPENSHIFT_EXAMPLE}/cleanup.sh -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/openshift-origin/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/openshift-origin/README.md](https://github.com/kubernetes/examples/blob/master/staging/openshift-origin/README.md) diff --git a/examples/persistent-volume-provisioning/README.md b/examples/persistent-volume-provisioning/README.md index 4d3f00af648..df49ca3b5c1 100644 --- a/examples/persistent-volume-provisioning/README.md +++ b/examples/persistent-volume-provisioning/README.md @@ -1,521 +1 @@ -## Persistent Volume Provisioning - -This example shows how to use dynamic persistent volume provisioning. - -### Prerequisites - -This example assumes that you have an understanding of Kubernetes administration and can modify the -scripts that launch kube-controller-manager. - -### Admin Configuration - -The admin must define `StorageClass` objects that describe named "classes" of storage offered in a cluster. Different classes might map to arbitrary levels or policies determined by the admin. When configuring a `StorageClass` object for persistent volume provisioning, the admin will need to describe the type of provisioner to use and the parameters that will be used by the provisioner when it provisions a `PersistentVolume` belonging to the class. - -The name of a StorageClass object is significant, and is how users can request a particular class, by specifying the name in their `PersistentVolumeClaim`. The `provisioner` field must be specified as it determines what volume plugin is used for provisioning PVs. The `parameters` field contains the parameters that describe volumes belonging to the storage class. Different parameters may be accepted depending on the `provisioner`. For example, the value `io1`, for the parameter `type`, and the parameter `iopsPerGB` are specific to EBS . When a parameter is omitted, some default is used. - -See [Kubernetes StorageClass documentation](https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) for complete reference of all supported parameters. - -#### AWS - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/aws-ebs -parameters: - type: io1 - zones: us-east-1d, us-east-1c - iopsPerGB: "10" -``` - -* `type`: `io1`, `gp2`, `sc1`, `st1`. See AWS docs for details. Default: `gp2`. -* `zone`: AWS zone. If neither zone nor zones is specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. Note: zone and zones parameters must not be used at the same time. -* `zones`: a comma separated list of AWS zone(s). If neither zone nor zones is specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. Note: zone and zones parameters must not be used at the same time. -* `iopsPerGB`: only for `io1` volumes. I/O operations per second per GiB. AWS volume plugin multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs). -* `encrypted`: denotes whether the EBS volume should be encrypted or not. Valid values are `true` or `false`. -* `kmsKeyId`: optional. The full Amazon Resource Name of the key to use when encrypting the volume. If none is supplied but `encrypted` is true, a key is generated by AWS. See AWS docs for valid ARN value. - -#### GCE - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-standard - zones: us-central1-a, us-central1-b -``` - -* `type`: `pd-standard` or `pd-ssd`. Default: `pd-ssd` -* `zone`: GCE zone. If neither zone nor zones is specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. Note: zone and zones parameters must not be used at the same time. -* `zones`: a comma separated list of GCE zone(s). If neither zone nor zones is specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. Note: zone and zones parameters must not be used at the same time. - -#### vSphere - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/vsphere-volume -parameters: - diskformat: eagerzeroedthick - fstype: ext3 -``` - -* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`. -* `fstype`: fstype that are supported by kubernetes. Default: `"ext4"`. - -#### Portworx Volume - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: portworx-io-priority-high -provisioner: kubernetes.io/portworx-volume -parameters: - repl: "1" - snap_interval: "70" - io_priority: "high" - -``` - -* `fs`: filesystem to be laid out: [none/xfs/ext4] (default: `ext4`) -* `block_size`: block size in Kbytes (default: `32`) -* `repl`: replication factor [1..3] (default: `1`) -* `io_priority`: IO Priority: [high/medium/low] (default: `low`) -* `snap_interval`: snapshot interval in minutes, 0 disables snaps (default: `0`) -* `aggregation_level`: specifies the number of chunks the volume would be distributed into, 0 indicates a non-aggregated volume (default: `0`) -* `ephemeral`: ephemeral storage [true/false] (default `false`) - -For a complete example refer ([Portworx Volume docs](../volumes/portworx/README.md)) - -#### StorageOS - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: sc-fast -provisioner: kubernetes.io/storageos -parameters: - pool: default - description: Kubernetes volume - fsType: ext4 - adminSecretNamespace: default - adminSecretName: storageos-secret -``` - -* `pool`: The name of the StorageOS distributed capacity pool to provision the volume from. Uses the `default` pool which is normally present if not specified. -* `description`: The description to assign to volumes that were created dynamically. All volume descriptions will be the same for the storage class, but different storage classes can be used to allow descriptions for different use cases. Defaults to `Kubernetes volume`. -* `fsType`: The default filesystem type to request. Note that user-defined rules within StorageOS may override this value. Defaults to `ext4`. -* `adminSecretNamespace`: The namespace where the API configuration secret is located. Required if adminSecretName set. -* `adminSecretName`: The name of the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. - -For a complete example refer to the ([StorageOS example](../../volumes/storageos/README.md)) - -#### GLUSTERFS - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/glusterfs -parameters: - resturl: "http://127.0.0.1:8081" - clusterid: "630372ccdc720a92c681fb928f27b53f" - restuser: "admin" - secretNamespace: "default" - secretName: "heketi-secret" - gidMin: "40000" - gidMax: "50000" - volumetype: "replicate:3" -``` - -Example storageclass can be found in [glusterfs-storageclass.yaml](glusterfs/glusterfs-storageclass.yaml). - -* `resturl` : Gluster REST service/Heketi service url which provision gluster volumes on demand. The general format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift/kubernetes setup, this can have a format similar to -`http://heketi-storage-project.cloudapps.mystorage.com` where the fqdn is a resolvable heketi service url. - -* `restauthenabled` : Gluster REST service authentication boolean that enables authentication to the REST server. If this value is 'true', `restuser` and `restuserkey` or `secretNamespace` + `secretName` have to be filled. This option is deprecated, authentication is enabled when any of `restuser`, `restuserkey`, `secretName` or `secretNamespace` is specified. - -* `restuser` : Gluster REST service/Heketi user who has access to create volumes in the Gluster Trusted Pool. - -* `restuserkey` : Gluster REST service/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of `secretNamespace` + `secretName`. - -* `secretNamespace` + `secretName` : Identification of Secret instance that contains user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both `secretNamespace` and `secretName` are omitted. The provided secret must have type "kubernetes.io/glusterfs". -When both `restuserkey` and `secretNamespace` + `secretName` is specified, the secret will be used. - -* `clusterid`: `630372ccdc720a92c681fb928f27b53f` is the ID of the cluster which will be used by Heketi when provisioning the volume. It can also be a list of clusterids, for ex: -"8452344e2becec931ece4e33c4674e4e,42982310de6c63381718ccfa6d8cf397". This is an optional parameter. - -Example of a secret can be found in [glusterfs-secret.yaml](glusterfs/glusterfs-secret.yaml). - -* `gidMin` + `gidMax` : The minimum and maximum value of GID range for the storage class. A unique value (GID) in this range ( gidMin-gidMax ) will be used for dynamically provisioned volumes. These are optional values. If not specified, the volume will be provisioned with a value between 2000-2147483647 which are defaults for gidMin and gidMax respectively. - -* `volumetype` : The volume type and its parameters can be configured with this optional value. If the volume type is not mentioned, it's up to the provisioner to decide the volume type. -For example: - - 'Replica volume': - `volumetype: replicate:3` where '3' is replica count. - 'Disperse/EC volume': - `volumetype: disperse:4:2` where '4' is data and '2' is the redundancy count. - 'Distribute volume': - `volumetype: none` - -For available volume types and its administration options refer: ([Administration Guide](https://access.redhat.com/documentation/en-US/Red_Hat_Storage/3.1/html/Administration_Guide/part-Overview.html)) - -Reference : ([How to configure Gluster on Kubernetes](https://github.com/gluster/gluster-kubernetes/blob/master/docs/setup-guide.md)) - -Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology)) - -When the persistent volumes are dynamically provisioned, the Gluster plugin automatically create an endpoint and a headless service in the name `gluster-dynamic-`. This dynamic endpoint and service will be deleted automatically when the persistent volume claim is deleted. - - -#### OpenStack Cinder - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: gold -provisioner: kubernetes.io/cinder -parameters: - type: fast - availability: nova -``` - -* `type`: [VolumeType](http://docs.openstack.org/admin-guide/dashboard-manage-volumes.html) created in Cinder. Default is empty. -* `availability`: Availability Zone. Default is empty. - -#### Ceph RBD - -```yaml - apiVersion: storage.k8s.io/v1 - kind: StorageClass - metadata: - name: fast - provisioner: kubernetes.io/rbd - parameters: - monitors: 10.16.153.105:6789 - adminId: kube - adminSecretName: ceph-secret - adminSecretNamespace: kube-system - pool: kube - userId: kube - userSecretName: ceph-secret-user - imageFormat: "1" -``` - -* `monitors`: Ceph monitors, comma delimited. It is required. -* `adminId`: Ceph client ID that is capable of creating images in the pool. Default is "admin". -* `adminSecret`: Secret Name for `adminId`. It is required. The provided secret must have type "kubernetes.io/rbd". -* `adminSecretNamespace`: The namespace for `adminSecret`. Default is "default". -* `pool`: Ceph RBD pool. Default is "rbd". -* `userId`: Ceph client ID that is used to map the RBD image. Default is the same as `adminId`. -* `userSecretName`: The name of Ceph Secret for `userId` to map RBD image. It must exist in the same namespace as PVCs. It is required. -* `imageFormat`: Ceph RBD image format, "1" or "2". Default is "1". -* `imageFeatures`: Ceph RBD image format 2 features, comma delimited. This is optional, and only be used if you set `imageFormat` to "2". Currently supported features are `layering` only. Default is "", no features is turned on. - -NOTE: We cannot turn on `exclusive-lock` feature for now (and `object-map`, `fast-diff`, `journaling` which require `exclusive-lock`), because exclusive lock and advisory lock cannot work together. (See [#45805](https://issue.k8s.io/45805)) - -#### Quobyte - - - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/quobyte -parameters: - quobyteAPIServer: "http://138.68.74.142:7860" - registry: "138.68.74.142:7861" - adminSecretName: "quobyte-admin-secret" - adminSecretNamespace: "kube-system" - user: "root" - group: "root" - quobyteConfig: "BASE" - quobyteTenant: "DEFAULT" -``` - -[Download example](quobyte/quobyte-storage-class.yaml?raw=true) - - -* **quobyteAPIServer** API Server of Quobyte in the format http(s)://api-server:7860 -* **registry** Quobyte registry to use to mount the volume. You can specify the registry as : pair or if you want to specify multiple registries you just have to put a comma between them e.q. :,:,:. The host can be an IP address or if you have a working DNS you can also provide the DNS names. -* **adminSecretName** secret that holds information about the Quobyte user and the password to authenticate against the API server. The provided secret must have type "kubernetes.io/quobyte". -* **adminSecretNamespace** The namespace for **adminSecretName**. Default is `default`. -* **user** maps all access to this user. Default is `root`. -* **group** maps all access to this group. Default is `nfsnobody`. -* **quobyteConfig** use the specified configuration to create the volume. You can create a new configuration or modify an existing one with the Web console or the quobyte CLI. Default is `BASE` -* **quobyteTenant** use the specified tenant ID to create/delete the volume. This Quobyte tenant has to be already present in Quobyte. For Quobyte < 1.4 use an empty string `""` as `DEFAULT` tenant. Default is `DEFAULT` - -First create Quobyte admin's Secret in the system namespace. Here the Secret is created in `kube-system`: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/quobyte/quobyte-admin-secret.yaml --namespace=kube-system -``` - -Then create the Quobyte storage class: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml -``` - -Now create a PVC - -``` -$ kubectl create -f examples/persistent-volume-provisioning/claim1.json -``` - -Check the created PVC: - -``` -$ kubectl describe pvc -Name: claim1 -Namespace: default -Status: Bound -Volume: pvc-bdb82652-694a-11e6-b811-080027242396 -Labels: -Capacity: 3Gi -Access Modes: RWO -No events. - -$ kubectl describe pv -Name: pvc-bdb82652-694a-11e6-b811-080027242396 -Labels: -Status: Bound -Claim: default/claim1 -Reclaim Policy: Delete -Access Modes: RWO -Capacity: 3Gi -Message: -Source: - Type: Quobyte (a Quobyte mount on the host that shares a pod's lifetime) - Registry: 138.68.79.14:7861 - Volume: kubernetes-dynamic-pvc-bdb97c58-694a-11e6-91b6-080027242396 - ReadOnly: false -No events. -``` - -Create a Pod to use the PVC: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/quobyte/example-pod.yaml -``` - -#### Azure Disk - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/azure-disk -parameters: - skuName: Standard_LRS - location: eastus - storageAccount: azure_storage_account_name -``` - -* `skuName`: Azure storage account Sku tier. Default is empty. -* `location`: Azure storage account location. Default is empty. -* `storageAccount`: Azure storage account name. If storage account is not provided, all storage accounts associated with the resource group are searched to find one that matches `skuName` and `location`. If storage account is provided, it must reside in the same resource group as the cluster, and `skuName` and `location` are ignored. - -#### Azure File - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1beta1 -metadata: - name: slow -provisioner: kubernetes.io/azure-file -parameters: - skuName: Standard_LRS - location: eastus - storageAccount: azure_storage_account_name -``` - -The parameters are the same as those used by [Azure Disk](#azure-disk) - -### User provisioning requests - -Users request dynamically provisioned storage by including a storage class in their `PersistentVolumeClaim` using `spec.storageClassName` attribute. -It is required that this value matches the name of a `StorageClass` configured by the administrator. - -``` -{ - "kind": "PersistentVolumeClaim", - "apiVersion": "v1", - "metadata": { - "name": "claim1" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "3Gi" - } - }, - "storageClassName": "slow" - } -} -``` - -### Sample output - -#### GCE - -This example uses GCE but any provisioner would follow the same flow. - -First we note there are no Persistent Volumes in the cluster. After creating a storage class and a claim including that storage class, we see a new PV is created -and automatically bound to the claim requesting storage. - - -``` -$ kubectl get pv - -$ kubectl create -f examples/persistent-volume-provisioning/gce-pd.yaml -storageclass "slow" created - -$ kubectl create -f examples/persistent-volume-provisioning/claim1.json -persistentvolumeclaim "claim1" created - -$ kubectl get pv -NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE -pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO Bound default/claim1 4s - -$ kubectl get pvc -NAME LABELS STATUS VOLUME CAPACITY ACCESSMODES AGE -claim1 Bound pvc-bb6d2f0c-534c-11e6-9348-42010af00002 3Gi RWO 7s - -# delete the claim to release the volume -$ kubectl delete pvc claim1 -persistentvolumeclaim "claim1" deleted - -# the volume is deleted in response to being release of its claim -$ kubectl get pv - -``` - - -#### Ceph RBD - -This section will guide you on how to configure and use the Ceph RBD provisioner. - -##### Pre-requisites - -For this to work you must have a functional Ceph cluster, and the `rbd` command line utility must be installed on any host/container that `kube-controller-manager` or `kubelet` is running on. - -##### Configuration - -First we must identify the Ceph client admin key. This is usually found in `/etc/ceph/ceph.client.admin.keyring` on your Ceph cluster nodes. The file will look something like this: - -``` -[client.admin] - key = AQBfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx== - auid = 0 - caps mds = "allow" - caps mon = "allow *" - caps osd = "allow *" -``` - -From the key value, we will create a secret. We must create the Ceph admin Secret in the namespace defined in our `StorageClass`. In this example we've set the namespace to `kube-system`. - -``` -$ kubectl create secret generic ceph-secret-admin --from-literal=key='AQBfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx==' --namespace=kube-system --type=kubernetes.io/rbd -``` - -Now modify `examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml` to reflect your environment, particularly the `monitors` field. We are now ready to create our RBD Storage Class: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml -``` - -The kube-controller-manager is now able to provision storage, however we still need to be able to map the RBD volume to a node. Mapping should be done with a non-privileged key, if you have existing users you can get all keys by running `ceph auth list` on your Ceph cluster with the admin key. For this example we will create a new user and pool. - -``` -$ ceph osd pool create kube 512 -$ ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube' -[client.kube] - key = AQBQyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy== -``` - -This key will be made into a secret, just like the admin secret. However this user secret will need to be created in every namespace where you intend to consume RBD volumes provisioned in our example storage class. Let's create a namespace called `myns`, and create the user secret in that namespace. - -``` -kubectl create namespace myns -kubectl create secret generic ceph-secret-user --from-literal=key='AQBQyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy==' --namespace=myns --type=kubernetes.io/rbd -``` - -You are now ready to provision and use RBD storage. - -##### Usage - -With the storageclass configured, let's create a PVC in our example namespace, `myns`: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/claim1.json --namespace=myns -``` - -Eventually the PVC creation will result in a PV and RBD volume to match: - -``` -$ kubectl describe pvc --namespace=myns -Name: claim1 -Namespace: myns -Status: Bound -Volume: pvc-1cfa23b3-664b-11e6-9eb9-90b11c09520d -Labels: -Capacity: 3Gi -Access Modes: RWO -No events. - -$ kubectl describe pv -Name: pvc-1cfa23b3-664b-11e6-9eb9-90b11c09520d -Labels: -Status: Bound -Claim: myns/claim1 -Reclaim Policy: Delete -Access Modes: RWO -Capacity: 3Gi -Message: -Source: - Type: RBD (a Rados Block Device mount on the host that shares a pod's lifetime) - CephMonitors: [127.0.0.1:6789] - RBDImage: kubernetes-dynamic-pvc-1cfb1862-664b-11e6-9a5d-90b11c09520d - FSType: - RBDPool: kube - RadosUser: kube - Keyring: /etc/ceph/keyring - SecretRef: &{ceph-secret-user} - ReadOnly: false -No events. -``` - -With our storage provisioned, we can now create a Pod to use the PVC: - -``` -$ kubectl create -f examples/persistent-volume-provisioning/rbd/pod.yaml --namespace=myns -``` - -Now our pod has an RBD mount! - -``` -$ export PODNAME=`kubectl get pod --selector='role=server' --namespace=myns --output=template --template="{{with index .items 0}}{{.metadata.name}}{{end}}"` -$ kubectl exec -it $PODNAME --namespace=myns -- df -h | grep rbd -/dev/rbd1 2.9G 4.5M 2.8G 1% /var/lib/www/html -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/persistent-volume-provisioning/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md](https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md) diff --git a/examples/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml b/examples/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml index 739b94fe916..b9679d61dfb 100644 --- a/examples/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml +++ b/examples/persistent-volume-provisioning/quobyte/quobyte-storage-class.yaml @@ -12,3 +12,4 @@ parameters: group: "root" quobyteConfig: "BASE" quobyteTenant: "DEFAULT" + createQuota: "False" diff --git a/examples/persistent-volume-provisioning/rbd/ceph-secret-admin.yaml b/examples/persistent-volume-provisioning/rbd/ceph-secret-admin.yaml index f86d975f7f7..894a9df6b4a 100644 --- a/examples/persistent-volume-provisioning/rbd/ceph-secret-admin.yaml +++ b/examples/persistent-volume-provisioning/rbd/ceph-secret-admin.yaml @@ -6,4 +6,3 @@ type: "kubernetes.io/rbd" data: #Please note this value is base64 encoded. key: QVFEQ1pMdFhPUnQrSmhBQUFYaERWNHJsZ3BsMmNjcDR6RFZST0E9PQ== -type: kubernetes.io/rbd diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index f374d03786d..b33ddb25579 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -1,216 +1 @@ -## Phabricator example - -This example shows how to build a simple multi-tier web application using Kubernetes and Docker. - -The example combines a web frontend and an external service that provides MySQL database. We use CloudSQL on Google Cloud Platform in this example, but in principle any approach to running MySQL should work. - -### Step Zero: Prerequisites - -This example assumes that you have a basic understanding of kubernetes [services](https://kubernetes.io/docs/user-guide/services.md) and that you have forked the repository and [turned up a Kubernetes cluster](https://kubernetes.io/docs/getting-started-guides/): - -```sh -$ cd kubernetes -$ cluster/kube-up.sh -``` - -### Step One: Set up Cloud SQL instance - -Follow the [official instructions](https://cloud.google.com/sql/docs/getting-started) to set up Cloud SQL instance. - -In the remaining part of this example we will assume that your instance is named "phabricator-db", has IP 1.2.3.4, is listening on port 3306 and the password is "1234". - -### Step Two: Authenticate phabricator in Cloud SQL - -In order to allow phabricator to connect to your Cloud SQL instance you need to run the following command to authorize all your nodes within a cluster: - -```bash -NODE_NAMES=`kubectl get nodes | cut -d" " -f1 | tail -n+2` -NODE_IPS=`gcloud compute instances list $NODE_NAMES | tr -s " " | cut -d" " -f 5 | tail -n+2` -gcloud sql instances patch phabricator-db --authorized-networks $NODE_IPS -``` - -Otherwise you will see the following logs: - -```bash -$ kubectl logs phabricator-controller-02qp4 -[...] -Raw MySQL Error: Attempt to connect to root@1.2.3.4 failed with error -#2013: Lost connection to MySQL server at 'reading initial communication packet', system error: 0. - -``` - -### Step Three: Turn up the phabricator - -To start Phabricator server use the file [`examples/phabricator/phabricator-controller.json`](phabricator-controller.json) which describes a [replication controller](https://kubernetes.io/docs/user-guide/replication-controller.md) with a single [pod](https://kubernetes.io/docs/user-guide/pods.md) running an Apache server with Phabricator PHP source: - - - -```json -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "phabricator-controller", - "labels": { - "name": "phabricator" - } - }, - "spec": { - "replicas": 1, - "selector": { - "name": "phabricator" - }, - "template": { - "metadata": { - "labels": { - "name": "phabricator" - } - }, - "spec": { - "containers": [ - { - "name": "phabricator", - "image": "fgrzadkowski/example-php-phabricator", - "ports": [ - { - "name": "http-server", - "containerPort": 80 - } - ], - "env": [ - { - "name": "MYSQL_SERVICE_IP", - "value": "1.2.3.4" - }, - { - "name": "MYSQL_SERVICE_PORT", - "value": "3306" - }, - { - "name": "MYSQL_PASSWORD", - "value": "1234" - } - ] - } - ] - } - } - } -} -``` - -[Download example](phabricator-controller.json?raw=true) - - -Create the phabricator pod in your Kubernetes cluster by running: - -```sh -$ kubectl create -f examples/phabricator/phabricator-controller.json -``` - -**Note:** Remember to substitute environment variable values in json file before create replication controller. - -Once that's up you can list the pods in the cluster, to verify that it is running: - -```sh -kubectl get pods -``` - -You'll see a single phabricator pod. It will also display the machine that the pod is running on once it gets placed (may take up to thirty seconds): - -``` -NAME READY STATUS RESTARTS AGE -phabricator-controller-9vy68 1/1 Running 0 1m -``` - -If you ssh to that machine, you can run `docker ps` to see the actual pod: - -```sh -me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-2 - -$ sudo docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -54983bc33494 fgrzadkowski/phabricator:latest "/run.sh" 2 hours ago Up 2 hours k8s_phabricator.d6b45054_phabricator-controller-02qp4.default.api_eafb1e53-b6a9-11e4-b1ae-42010af05ea6_01c2c4ca -``` - -(Note that initial `docker pull` may take a few minutes, depending on network conditions. During this time, the `get pods` command will return `Pending` because the container has not yet started ) - -### Step Four: Turn up the phabricator service - -A Kubernetes 'service' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via *environment variables*. Services find the containers to load balance based on pod labels. These environment variables are typically referenced in application code, shell scripts, or other places where one node needs to talk to another in a distributed system. You should catch up on [kubernetes services](https://kubernetes.io/docs/user-guide/services.md) before proceeding. - -The pod that you created in Step Three has the label `name=phabricator`. The selector field of the service determines which pods will receive the traffic sent to the service. - -Use the file [`examples/phabricator/phabricator-service.json`](phabricator-service.json): - - - -```json -{ - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "phabricator" - }, - "spec": { - "ports": [ - { - "port": 80, - "targetPort": "http-server" - } - ], - "selector": { - "name": "phabricator" - }, - "type": "LoadBalancer" - } -} -``` - -[Download example](phabricator-service.json?raw=true) - - -To create the service run: - -```sh -$ kubectl create -f examples/phabricator/phabricator-service.json -phabricator -``` - -To play with the service itself, find the external IP of the load balancer: - -```console -$ kubectl get services -NAME LABELS SELECTOR IP(S) PORT(S) -kubernetes component=apiserver,provider=kubernetes 10.0.0.1 443/TCP -phabricator name=phabricator 10.0.31.173 80/TCP -$ kubectl get services phabricator -o json | grep ingress -A 4 - "ingress": [ - { - "ip": "104.197.13.125" - } - ] -``` - -and then visit port 80 of that IP address. - -**Note**: Provisioning of the external IP address may take few minutes. - -**Note**: You may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-node`: - -```sh -$ gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-node -``` - -### Step Six: Cleanup - -To turn down a Kubernetes cluster: - -```sh -$ cluster/kube-down.sh -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/phabricator/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/phabricator/README.md](https://github.com/kubernetes/examples/blob/master/staging/phabricator/README.md) diff --git a/examples/phabricator/php-phabricator/Dockerfile b/examples/phabricator/php-phabricator/Dockerfile index f39b9421af9..779e799ce6c 100644 --- a/examples/phabricator/php-phabricator/Dockerfile +++ b/examples/phabricator/php-phabricator/Dockerfile @@ -15,21 +15,25 @@ FROM ubuntu:14.04 # Install all the required packages. -RUN apt-get update -RUN apt-get -y install \ +RUN apt-get update && \ + apt-get -y install \ git apache2 dpkg-dev python-pygments \ - php5 php5-mysql php5-gd php5-dev php5-curl php-apc php5-cli php5-json php5-xhprof -RUN a2enmod rewrite -RUN apt-get source php5 -RUN (cd `ls -1F | grep '^php5-.*/$'`/ext/pcntl && phpize && ./configure && make && sudo make install) + php5 php5-mysql php5-gd php5-dev php5-curl php-apc php5-cli php5-json php5-xhprof && \ + apt-get -y clean autoclean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +#Configure php +RUN a2enmod rewrite && \ + apt-get source php5 && \ + (cd `ls -1F | grep '^php5-.*/$'`/ext/pcntl && phpize && ./configure && make && sudo make install) # Load code source. RUN mkdir /home/www-data -RUN cd /home/www-data && git clone https://github.com/phacility/libphutil.git -RUN cd /home/www-data && git clone https://github.com/phacility/arcanist.git -RUN cd /home/www-data && git clone https://github.com/phacility/phabricator.git -RUN chown -R www-data /home/www-data -RUN chgrp -R www-data /home/www-data +RUN cd /home/www-data && git clone https://github.com/phacility/libphutil.git && \ + cd /home/www-data && git clone https://github.com/phacility/arcanist.git && \ + cd /home/www-data && git clone https://github.com/phacility/phabricator.git && \ + chown -R www-data /home/www-data && \ + chgrp -R www-data /home/www-data ADD 000-default.conf /etc/apache2/sites-available/000-default.conf ADD run.sh /run.sh diff --git a/examples/podsecuritypolicy/rbac/README.md b/examples/podsecuritypolicy/rbac/README.md index e606d23dec7..66af1b7c1e8 100644 --- a/examples/podsecuritypolicy/rbac/README.md +++ b/examples/podsecuritypolicy/rbac/README.md @@ -1,196 +1 @@ -## PSP RBAC Example - -This example demonstrates the usage of *PodSecurityPolicy* to control access to privileged containers -based on role and groups. - -### Prerequisites - -The server must be started to enable the appropriate APIs and flags - -1. allow privileged containers -1. allow security contexts -1. enable RBAC and accept any token -1. enable PodSecurityPolicies -1. use the PodSecurityPolicy admission controller - -If you are using the `local-up-cluster.sh` script you may enable these settings with the following syntax - -``` -PSP_ADMISSION=true ALLOW_PRIVILEGED=true ALLOW_SECURITY_CONTEXT=true ALLOW_ANY_TOKEN=true ENABLE_RBAC=true RUNTIME_CONFIG="extensions/v1beta1=true,extensions/v1beta1/podsecuritypolicy=true" hack/local-up-cluster.sh -``` - -### Using the protected port - -It is important to note that this example uses the following syntax to test with RBAC - -1. `--server=https://127.0.0.1:6443`: when performing requests this ensures that the protected port is used so -that RBAC will be enforced -1. `--token={user}/{group(s)}`: this syntax allows a request to specify the username and groups to use for -testing. It relies on the `ALLOW_ANY_TOKEN` setting. - -## Creating the policies, roles, and bindings - -### Policies - -The first step to enforcing cluster constraints via PSP is to create your policies. In this -example we will use two policies, `restricted` and `privileged`. For simplicity, the only difference -between these policies is the ability to run a privileged container. - -```yaml -apiVersion: extensions/v1beta1 -kind: PodSecurityPolicy -metadata: - name: privileged -spec: - fsGroup: - rule: RunAsAny - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: extensions/v1beta1 -kind: PodSecurityPolicy -metadata: - name: restricted -spec: - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' - -``` - -To create these policies run - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/policies.yaml -podsecuritypolicy "privileged" created -podsecuritypolicy "restricted" created -``` - -### Roles and bindings - -In order to create a pod, either the creating user or the service account -specified by the pod must be authorized to use a `PodSecurityPolicy` object -that allows the pod. That authorization is determined by the ability to perform -the `use` verb on a particular `podsecuritypolicies` resource. The `use` verb -is a special verb that grants access to use a policy while not permitting any -other access. For this example, we'll first create RBAC `ClusterRoles` that -enable access to `use` specific policies. - -1. `restricted-psp-user`: this role allows the `use` verb on the `restricted` policy only -2. `privileged-psp-user`: this role allows the `use` verb on the `privileged` policy only - - -We can then create `ClusterRoleBindings` to grant groups of users the -"restricted" and/or "privileged" `ClusterRoles`. In this example, the bindings -grant the following roles to groups. - -1. `privileged`: this group is bound to the `privilegedPSP` role and `restrictedPSP` role which gives users -in this group access to both policies. -1. `restricted`: this group is bound to the `restrictedPSP` role. -1. `system:authenticated`: this is a system group for any authenticated user. It is bound to the `edit` -role which is already provided by the cluster. - -To create these roles and bindings run - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/roles.yaml -clusterrole "restricted-psp-user" created -clusterrole "privileged-psp-user" created - -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/bindings.yaml -clusterrolebinding "privileged-psp-users" created -clusterrolebinding "restricted-psp-users" created -clusterrolebinding "edit" created -``` - -## Testing access - -### Restricted user can create non-privileged pods - -Create the pod - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml -pod "nginx" created -``` - -Check the PSP that allowed the pod - -``` -$ kubectl get pod nginx -o yaml | grep psp - kubernetes.io/psp: restricted -``` - -### Restricted user cannot create privileged pods - -Delete the existing pod - -``` -$ kubectl delete pod nginx -pod "nginx" deleted -``` - -Create the privileged pod - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml -Error from server (Forbidden): error when creating "examples/podsecuritypolicy/rbac/pod_priv.yaml": pods "nginx" is forbidden: unable to validate against any pod security policy: [spec.containers[0].securityContext.privileged: Invalid value: true: Privileged containers are not allowed] -``` - -### Privileged user can create non-privileged pods - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml -pod "nginx" created -``` - -Check the PSP that allowed the pod. Note, this could be the `restricted` or `privileged` PSP since both allow -for the creation of non-privileged pods. - -``` -$ kubectl get pod nginx -o yaml | egrep "psp|privileged" - kubernetes.io/psp: privileged - privileged: false -``` - -### Privileged user can create privileged pods - -Delete the existing pod - -``` -$ kubectl delete pod nginx -pod "nginx" deleted -``` - -Create the privileged pod - -``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml -pod "nginx" created -``` - -Check the PSP that allowed the pod. - -``` -$ kubectl get pod nginx -o yaml | egrep "psp|privileged" - kubernetes.io/psp: privileged - privileged: true -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/podsecuritypolicy/rbac/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/podsecuritypolicy/rbac/README.md](https://github.com/kubernetes/examples/blob/master/staging/podsecuritypolicy/rbac/README.md) diff --git a/examples/podsecuritypolicy/rbac/bindings.yaml b/examples/podsecuritypolicy/rbac/bindings.yaml index b07f99ee21b..13b8ac3c4ac 100644 --- a/examples/podsecuritypolicy/rbac/bindings.yaml +++ b/examples/podsecuritypolicy/rbac/bindings.yaml @@ -31,7 +31,8 @@ roleRef: kind: ClusterRole name: restricted-psp-user --- -# edit grants edit role to system:authenticated. +# edit grants edit role to the groups +# restricted and privileged. apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: diff --git a/examples/runtime-constraints/README.md b/examples/runtime-constraints/README.md index 82f6c0ee1de..3f245a12d43 100644 --- a/examples/runtime-constraints/README.md +++ b/examples/runtime-constraints/README.md @@ -1,285 +1 @@ -## Runtime Constraints example - -This example demonstrates how Kubernetes enforces runtime constraints for compute resources. - -### Prerequisites - -For the purpose of this example, we will spin up a 1 node cluster using the Vagrant provider that -is not running with any additional add-ons that consume node resources. This keeps our demonstration -of compute resources easier to follow by starting with an empty cluster. - -``` -$ export KUBERNETES_PROVIDER=vagrant -$ export NUM_NODES=1 -$ export KUBE_ENABLE_CLUSTER_MONITORING=none -$ export KUBE_ENABLE_CLUSTER_DNS=false -$ export KUBE_ENABLE_CLUSTER_UI=false -$ cluster/kube-up.sh -``` - -We should now have a single node cluster running 0 pods. - -``` -$ cluster/kubectl.sh get nodes -NAME LABELS STATUS AGE -10.245.1.3 kubernetes.io/hostname=10.245.1.3 Ready 17m -$ cluster/kubectl.sh get pods --all-namespaces -``` - -When demonstrating runtime constraints, it's useful to show what happens when a node is under heavy load. For -this scenario, we have a single node with 2 cpus and 1GB of memory to demonstrate behavior under load, but the -results extend to multi-node scenarios. - -### CPU requests - -Each container in a pod may specify the amount of CPU it requests on a node. CPU requests are used at schedule time, and represent a minimum amount of CPU that should be reserved for your container to run. - -When executing your container, the Kubelet maps your containers CPU requests to CFS shares in the Linux kernel. CFS CPU shares do not impose a ceiling on the actual amount of CPU the container can use. Instead, it defines a relative weight across all containers on the system for how much CPU time the container should get if there is CPU contention. - -Let's demonstrate this concept using a simple container that will consume as much CPU as possible. - -``` -$ cluster/kubectl.sh run cpuhog \ - --image=busybox \ - --requests=cpu=100m \ - -- md5sum /dev/urandom -``` - -This will create a single pod on your node that requests 1/10 of a CPU, but it has no limit on how much CPU it may actually consume -on the node. - -To demonstrate this, if you SSH into your machine, you will see it is consuming as much CPU as possible on the node. - -``` -$ vagrant ssh node-1 -$ sudo docker stats $(sudo docker ps -q) -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -6b593b1a9658 0.00% 1.425 MB/1.042 GB 0.14% 1.038 kB/738 B -ae8ae4ffcfe4 150.06% 831.5 kB/1.042 GB 0.08% 0 B/0 B -``` - -As you can see, its consuming 150% of the total CPU. - -If we scale our replication controller to 20 pods, we should see that each container is given an equal proportion of CPU time. - -``` -$ cluster/kubectl.sh scale rc/cpuhog --replicas=20 -``` - -Once all the pods are running, you will see on your node that each container is getting approximately an equal proportion of CPU time. - -``` -$ sudo docker stats $(sudo docker ps -q) -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -089e2d061dee 9.24% 786.4 kB/1.042 GB 0.08% 0 B/0 B -0be33d6e8ddb 10.48% 823.3 kB/1.042 GB 0.08% 0 B/0 B -0f4e3c4a93e0 10.43% 786.4 kB/1.042 GB 0.08% 0 B/0 B -``` - -Each container is getting 10% of the CPU time per their scheduling request, and we are unable to schedule more. - -As you can see CPU requests are used to schedule pods to the node in a manner that provides weighted distribution of CPU time -when under contention. If the node is not being actively consumed by other containers, a container is able to burst up to as much -available CPU time as possible. If there is contention for CPU, CPU time is shared based on the requested value. - -Let's delete all existing resources in preparation for the next scenario. Verify all the pods are deleted and terminated. - -``` -$ cluster/kubectl.sh delete rc --all -$ cluster/kubectl.sh get pods -NAME READY STATUS RESTARTS AGE -``` - -### CPU limits - -So what do you do if you want to control the maximum amount of CPU that your container can burst to use in order provide a consistent -level of service independent of CPU contention on the node? You can specify an upper limit on the total amount of CPU that a pod's -container may consume. - -To enforce this feature, your node must run a docker version >= 1.7, and your operating system kernel must -have support for CFS quota enabled. Finally, your the Kubelet must be started with the following flag: - -``` -kubelet --cpu-cfs-quota=true -``` - -To demonstrate, let's create the same pod again, but this time set an upper limit to use 50% of a single CPU. - -``` -$ cluster/kubectl.sh run cpuhog \ - --image=busybox \ - --requests=cpu=100m \ - --limits=cpu=500m \ - -- md5sum /dev/urandom -``` - -Let's SSH into the node, and look at usage stats. - -``` -$ vagrant ssh node-1 -$ sudo su -$ docker stats $(docker ps -q) -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -2a196edf7de2 47.38% 835.6 kB/1.042 GB 0.08% 0 B/0 B -... -``` - -As you can see, the container is no longer allowed to consume all available CPU on the node. Instead, it is being limited to use -50% of a CPU over every 100ms period. As a result, the reported value will be in the range of 50% but may oscillate above and below. - -Let's delete all existing resources in preparation for the next scenario. Verify all the pods are deleted and terminated. - -``` -$ cluster/kubectl.sh delete rc --all -$ cluster/kubectl.sh get pods -NAME READY STATUS RESTARTS AGE -``` - -### Memory requests - -By default, a container is able to consume as much memory on the node as possible. In order to improve placement of your -pods in the cluster, it is recommended to specify the amount of memory your container will require to run. The scheduler -will then take available node memory capacity into account prior to binding your pod to a node. - -Let's demonstrate this by creating a pod that runs a single container which requests 100Mi of memory. The container will -allocate and write to 200MB of memory every 2 seconds. - -``` -$ cluster/kubectl.sh run memhog \ - --image=derekwaynecarr/memhog \ - --requests=memory=100Mi \ - --command \ - -- /bin/sh -c "while true; do memhog -r100 200m; sleep 1; done" -``` - -If you look at output of docker stats on the node: - -``` -$ docker stats $(docker ps -q) -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -2badf74ae782 0.00% 1.425 MB/1.042 GB 0.14% 816 B/348 B -a320182967fa 105.81% 214.2 MB/1.042 GB 20.56% 0 B/0 B - -``` - -As you can see, the container is using approximately 200MB of memory, and is only limited to the 1GB of memory on the node. - -We scheduled against 100Mi, but have burst our memory usage to a greater value. - -We refer to this as memory having __Burstable__ quality of service for this container. - -Let's delete all existing resources in preparation for the next scenario. Verify all the pods are deleted and terminated. - -``` -$ cluster/kubectl.sh delete rc --all -$ cluster/kubectl.sh get pods -NAME READY STATUS RESTARTS AGE -``` - -### Memory limits - -If you specify a memory limit, you can constrain the amount of memory your container can use. - -For example, let's limit our container to 200Mi of memory, and just consume 100MB. - -``` -$ cluster/kubectl.sh run memhog \ - --image=derekwaynecarr/memhog \ - --limits=memory=200Mi \ - --command -- /bin/sh -c "while true; do memhog -r100 100m; sleep 1; done" -``` - -If you look at output of docker stats on the node: - -``` -$ docker stats $(docker ps -q) -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -5a7c22ae1837 125.23% 109.4 MB/209.7 MB 52.14% 0 B/0 B -c1d7579c9291 0.00% 1.421 MB/1.042 GB 0.14% 1.038 kB/816 B -``` - -As you can see, we are limited to 200Mi memory, and are only consuming 109.4MB on the node. - -Let's demonstrate what happens if you exceed your allowed memory usage by creating a replication controller -whose pod will keep being OOM killed because it attempts to allocate 300MB of memory, but is limited to 200Mi. - -``` -$ cluster/kubectl.sh run memhog-oom --image=derekwaynecarr/memhog --limits=memory=200Mi --command -- memhog -r100 300m -``` - -If we describe the created pod, you will see that it keeps restarting until it ultimately goes into a CrashLoopBackOff. - -The reason it is killed and restarts is because it is OOMKilled as it attempts to exceed its memory limit. - -``` -$ cluster/kubectl.sh get pods -NAME READY STATUS RESTARTS AGE -memhog-oom-gj9hw 0/1 CrashLoopBackOff 2 26s -$ cluster/kubectl.sh describe pods/memhog-oom-gj9hw | grep -C 3 "Terminated" - memory: 200Mi - State: Waiting - Reason: CrashLoopBackOff - Last Termination State: Terminated - Reason: OOMKilled - Exit Code: 137 - Started: Wed, 23 Sep 2015 15:23:58 -0400 -``` - -Let's clean-up before proceeding further. - -``` -$ cluster/kubectl.sh delete rc --all -``` - -### What if my node runs out of memory? - -If you only schedule __Guaranteed__ memory containers, where the request is equal to the limit, then you are not in major danger of -causing an OOM event on your node. If any individual container consumes more than their specified limit, it will be killed. - -If you schedule __BestEffort__ memory containers, where the request and limit is not specified, or __Burstable__ memory containers, where -the request is less than any specified limit, then it is possible that a container will request more memory than what is actually available on the node. - -If this occurs, the system will attempt to prioritize the containers that are killed based on their quality of service. This is done -by using the OOMScoreAdjust feature in the Linux kernel which provides a heuristic to rank a process between -1000 and 1000. Processes -with lower values are preserved in favor of processes with higher values. The system daemons (kubelet, kube-proxy, docker) all run with -low OOMScoreAdjust values. - -In simplest terms, containers with __Guaranteed__ memory containers are given a lower value than __Burstable__ containers which has -a lower value than __BestEffort__ containers. As a consequence, containers with __BestEffort__ should be killed before the other tiers. - -To demonstrate this, let's spin up a set of different replication controllers that will over commit the node. - -``` -$ cluster/kubectl.sh run mem-guaranteed --image=derekwaynecarr/memhog --replicas=2 --requests=cpu=10m --limits=memory=600Mi --command -- memhog -r100000 500m -$ cluster/kubectl.sh run mem-burstable --image=derekwaynecarr/memhog --replicas=2 --requests=cpu=10m,memory=600Mi --command -- memhog -r100000 100m -$ cluster/kubectl.sh run mem-besteffort --replicas=10 --image=derekwaynecarr/memhog --requests=cpu=10m --command -- memhog -r10000 500m -``` - -This will induce a SystemOOM - -``` -$ cluster/kubectl.sh get events | grep OOM -43m 8m 178 10.245.1.3 Node SystemOOM {kubelet 10.245.1.3} System OOM encountered -``` - -If you look at the pods: - -``` -$ cluster/kubectl.sh get pods -NAME READY STATUS RESTARTS AGE -... -mem-besteffort-zpnpm 0/1 CrashLoopBackOff 4 3m -mem-burstable-n0yz1 1/1 Running 0 4m -mem-burstable-q3dts 1/1 Running 0 4m -mem-guaranteed-fqsw8 1/1 Running 0 4m -mem-guaranteed-rkqso 1/1 Running 0 4m -``` - -You see that our BestEffort pod goes in a restart cycle, but the pods with greater levels of quality of service continue to function. - -As you can see, we rely on the Kernel to react to system OOM events. Depending on how your host operating -system was configured, and which process the Kernel ultimately decides to kill on your Node, you may experience unstable results. In addition, during an OOM event, while the kernel is cleaning up processes, the system may experience significant periods of slow down or appear unresponsive. As a result, while the system allows you to overcommit on memory, we recommend to not induce a Kernel sys OOM. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/runtime-constraints/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/runtime-constraints/README.md](https://github.com/kubernetes/examples/blob/master/staging/runtime-constraints/README.md) diff --git a/examples/selenium/README.md b/examples/selenium/README.md index ee101361302..e365937118b 100644 --- a/examples/selenium/README.md +++ b/examples/selenium/README.md @@ -1,199 +1 @@ -## Selenium on Kubernetes - -Selenium is a browser automation tool used primarily for testing web applications. However when Selenium is used in a CI pipeline to test applications, there is often contention around the use of Selenium resources. This example shows you how to deploy Selenium to Kubernetes in a scalable fashion. - -### Prerequisites - -This example assumes you have a working Kubernetes cluster and a properly configured kubectl client. See the [Getting Started Guides](https://kubernetes.io/docs/getting-started-guides/) for details. - -Google Container Engine is also a quick way to get Kubernetes up and running: https://cloud.google.com/container-engine/ - -Your cluster must have 4 CPU and 6 GB of RAM to complete the example up to the scaling portion. - -### Deploy Selenium Grid Hub: - -We will be using Selenium Grid Hub to make our Selenium install scalable via a master/worker model. The Selenium Hub is the master, and the Selenium Nodes are the workers(not to be confused with Kubernetes nodes). We only need one hub, but we're using a replication controller to ensure that the hub is always running: - -```console -kubectl create --filename=examples/selenium/selenium-hub-rc.yaml -``` - -The Selenium Nodes will need to know how to get to the Hub, let's create a service for the nodes to connect to. - -```console -kubectl create --filename=examples/selenium/selenium-hub-svc.yaml -``` - -### Verify Selenium Hub Deployment - -Let's verify our deployment of Selenium hub by connecting to the web console. - -#### Kubernetes Nodes Reachable - -If your Kubernetes nodes are reachable from your network, you can verify the hub by hitting it on the nodeport. You can retrieve the nodeport by typing `kubectl describe svc selenium-hub`, however the snippet below automates that by using kubectl's template functionality: - -```console -export NODEPORT=`kubectl get svc --selector='app=selenium-hub' --output=template --template="{{ with index .items 0}}{{with index .spec.ports 0 }}{{.nodePort}}{{end}}{{end}}"` -export NODE=`kubectl get nodes --output=template --template="{{with index .items 0 }}{{.metadata.name}}{{end}}"` - -curl http://$NODE:$NODEPORT -``` - -#### Kubernetes Nodes Unreachable - -If you cannot reach your Kubernetes nodes from your network, you can proxy via kubectl. - -```console -export PODNAME=`kubectl get pods --selector="app=selenium-hub" --output=template --template="{{with index .items 0}}{{.metadata.name}}{{end}}"` -kubectl port-forward $PODNAME 4444:4444 -``` - -In a separate terminal, you can now check the status. - -```console -curl http://localhost:4444 -``` - -#### Using Google Container Engine - -If you are using Google Container Engine, you can expose your hub via the internet. This is a bad idea for many reasons, but you can do it as follows: - -```console -kubectl expose rc selenium-hub --name=selenium-hub-external --labels="app=selenium-hub,external=true" --type=LoadBalancer -``` - -Then wait a few minutes, eventually your new `selenium-hub-external` service will be assigned a load balanced IP from gcloud. Once `kubectl get svc selenium-hub-external` shows two IPs, run this snippet. - -```console -export INTERNET_IP=`kubectl get svc --selector="app=selenium-hub,external=true" --output=template --template="{{with index .items 0}}{{with index .status.loadBalancer.ingress 0}}{{.ip}}{{end}}{{end}}"` - -curl http://$INTERNET_IP:4444/ -``` - -You should now be able to hit `$INTERNET_IP` via your web browser, and so can everyone else on the Internet! - -### Deploy Firefox and Chrome Nodes: - -Now that the Hub is up, we can deploy workers. - -This will deploy 2 Chrome nodes. - -```console -kubectl create --filename=examples/selenium/selenium-node-chrome-rc.yaml -``` - -And 2 Firefox nodes to match. - -```console -kubectl create --filename=examples/selenium/selenium-node-firefox-rc.yaml -``` - -Once the pods start, you will see them show up in the Selenium Hub interface. - -### Run a Selenium Job - -Let's run a quick Selenium job to validate our setup. - -#### Setup Python Environment - -First, we need to start a python container that we can attach to. - -```console -kubectl run selenium-python --image=google/python-hello -``` - -Next, we need to get inside this container. - -```console -export PODNAME=`kubectl get pods --selector="run=selenium-python" --output=template --template="{{with index .items 0}}{{.metadata.name}}{{end}}"` -kubectl exec --stdin=true --tty=true $PODNAME bash -``` - -Once inside, we need to install the Selenium library - -```console -pip install selenium -``` - -#### Run Selenium Job with Python - -We're all set up, start the python interpreter. - -```console -python -``` - -And paste in the contents of selenium-test.py. - -```python -from selenium import webdriver -from selenium.webdriver.common.desired_capabilities import DesiredCapabilities - -def check_browser(browser): - driver = webdriver.Remote( - command_executor='http://selenium-hub:4444/wd/hub', - desired_capabilities=getattr(DesiredCapabilities, browser) - ) - driver.get("http://google.com") - assert "google" in driver.page_source - driver.close() - print("Browser %s checks out!" % browser) - - -check_browser("FIREFOX") -check_browser("CHROME") -``` - -You should get - -``` ->>> check_browser("FIREFOX") -Browser FIREFOX checks out! ->>> check_browser("CHROME") -Browser CHROME checks out! -``` - -Congratulations, your Selenium Hub is up, with Firefox and Chrome nodes! - -### Scale your Firefox and Chrome nodes. - -If you need more Firefox or Chrome nodes, your hardware is the limit: - -```console -kubectl scale rc selenium-node-firefox --replicas=10 -kubectl scale rc selenium-node-chrome --replicas=10 -``` - -You now have 10 Firefox and 10 Chrome nodes, happy Seleniuming! - -### Debugging - -Sometimes it is necessary to check on a hung test. Each pod is running VNC. To check on one of the browser nodes via VNC, it's recommended that you proxy, since we don't want to expose a service for every pod, and the containers have a weak VNC password. Replace POD_NAME with the name of the pod you want to connect to. - -```console -kubectl port-forward $POD_NAME 5900:5900 -``` - -Then connect to localhost:5900 with your VNC client using the password "secret" - -Enjoy your scalable Selenium Grid! - -Adapted from: https://github.com/SeleniumHQ/docker-selenium - -### Teardown - -To remove all created resources, run the following: - -```console -kubectl delete rc selenium-hub -kubectl delete rc selenium-node-chrome -kubectl delete rc selenium-node-firefox -kubectl delete deployment selenium-python -kubectl delete svc selenium-hub -kubectl delete svc selenium-hub-external -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/selenium/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/selenium/README.md](https://github.com/kubernetes/examples/blob/master/staging/selenium/README.md) diff --git a/examples/sharing-clusters/README.md b/examples/sharing-clusters/README.md index 38f46fa0e7c..ffd44e29a61 100644 --- a/examples/sharing-clusters/README.md +++ b/examples/sharing-clusters/README.md @@ -1,187 +1 @@ -# Sharing Clusters - -This example demonstrates how to access one kubernetes cluster from another. It only works if both clusters are running on the same network, on a cloud provider that provides a private ip range per network (eg: GCE, GKE, AWS). - -## Setup - -Create a cluster in US (you don't need to do this if you already have a running kubernetes cluster) - -```shell -$ cluster/kube-up.sh -``` - -Before creating our second cluster, lets have a look at the kubectl config: - -```yaml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: REDACTED - server: https://104.197.84.16 - name: -... -current-context: -... -``` - -Now spin up the second cluster in Europe - -```shell -$ ./cluster/kube-up.sh -$ KUBE_GCE_ZONE=europe-west1-b KUBE_GCE_INSTANCE_PREFIX=eu ./cluster/kube-up.sh -``` - -Your kubectl config should contain both clusters: - -```yaml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: REDACTED - server: https://146.148.25.221 - name: -- cluster: - certificate-authority-data: REDACTED - server: https://104.197.84.16 - name: -... -current-context: kubernetesdev_eu -... -``` - -And kubectl get nodes should agree: - -``` -$ kubectl get nodes -NAME LABELS STATUS -eu-node-0n61 kubernetes.io/hostname=eu-node-0n61 Ready -eu-node-79ua kubernetes.io/hostname=eu-node-79ua Ready -eu-node-7wz7 kubernetes.io/hostname=eu-node-7wz7 Ready -eu-node-loh2 kubernetes.io/hostname=eu-node-loh2 Ready - -$ kubectl config use-context -$ kubectl get nodes -NAME LABELS STATUS -kubernetes-node-5jtd kubernetes.io/hostname=kubernetes-node-5jtd Ready -kubernetes-node-lqfc kubernetes.io/hostname=kubernetes-node-lqfc Ready -kubernetes-node-sjra kubernetes.io/hostname=kubernetes-node-sjra Ready -kubernetes-node-wul8 kubernetes.io/hostname=kubernetes-node-wul8 Ready -``` - -## Testing reachability - -For this test to work we'll need to create a service in europe: - -``` -$ kubectl config use-context -$ kubectl create -f /tmp/secret.json -$ kubectl create -f examples/https-nginx/nginx-app.yaml -$ kubectl exec -it my-nginx-luiln -- echo "Europe nginx" >> /usr/share/nginx/html/index.html -$ kubectl get ep -NAME ENDPOINTS -kubernetes 10.240.249.92:443 -nginxsvc 10.244.0.4:80,10.244.0.4:443 -``` - -Just to test reachability, we'll try hitting the Europe nginx from our initial US central cluster. Create a basic curl pod in the US cluster: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: curlpod -spec: - containers: - - image: radial/busyboxplus:curl - command: - - sleep - - "360000000" - imagePullPolicy: IfNotPresent - name: curlcontainer - restartPolicy: Always -``` - -And test that you can actually reach the test nginx service across continents - -``` -$ kubectl config use-context -$ kubectl -it exec curlpod -- /bin/sh -[ root@curlpod:/ ]$ curl http://10.244.0.4:80 -Europe nginx -``` - -## Granting access to the remote cluster - -We will grant the US cluster access to the Europe cluster. Basically we're going to setup a secret that allows kubectl to function in a pod running in the US cluster, just like it did on our local machine in the previous step. First create a secret with the contents of the current .kube/config: - -```shell -$ kubectl config use-context -$ go run ./make_secret.go --kubeconfig=$HOME/.kube/config > /tmp/secret.json -$ kubectl config use-context -$ kubectl create -f /tmp/secret.json -``` - -Create a kubectl pod that uses the secret, in the US cluster. - -```json -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "kubectl-tester" - }, - "spec": { - "volumes": [ - { - "name": "secret-volume", - "secret": { - "secretName": "kubeconfig" - } - } - ], - "containers": [ - { - "name": "kubectl", - "image": "bprashanth/kubectl:0.0", - "imagePullPolicy": "Always", - "env": [ - { - "name": "KUBECONFIG", - "value": "/.kube/config" - } - ], - "args": [ - "proxy", "-p", "8001" - ], - "volumeMounts": [ - { - "name": "secret-volume", - "mountPath": "/.kube" - } - ] - } - ] - } -} -``` - -And check that you can access the remote cluster - -```shell -$ kubectl config use-context -$ kubectl exec -it kubectl-tester bash - -kubectl-tester $ kubectl get nodes -NAME LABELS STATUS -eu-node-0n61 kubernetes.io/hostname=eu-node-0n61 Ready -eu-node-79ua kubernetes.io/hostname=eu-node-79ua Ready -eu-node-7wz7 kubernetes.io/hostname=eu-node-7wz7 Ready -eu-node-loh2 kubernetes.io/hostname=eu-node-loh2 Ready -``` - -For a more advanced example of sharing clusters, see the [service-loadbalancer](https://github.com/kubernetes/contrib/tree/master/service-loadbalancer/README.md) - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/sharing-clusters/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/sharing-clusters/README.md](https://github.com/kubernetes/examples/blob/master/staging/sharing-clusters/README.md) diff --git a/examples/spark/README.md b/examples/spark/README.md index 5692efbb318..edfe3efd23c 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -1,373 +1 @@ -# Spark example - -Following this example, you will create a functional [Apache -Spark](http://spark.apache.org/) cluster using Kubernetes and -[Docker](http://docker.io). - -You will setup a Spark master service and a set of Spark workers using Spark's [standalone mode](http://spark.apache.org/docs/latest/spark-standalone.html). - -For the impatient expert, jump straight to the [tl;dr](#tldr) -section. - -### Sources - -The Docker images are heavily based on https://github.com/mattf/docker-spark. -And are curated in https://github.com/kubernetes/application-images/tree/master/spark - -The Spark UI Proxy is taken from https://github.com/aseigneurin/spark-ui-proxy. - -The PySpark examples are taken from http://stackoverflow.com/questions/4114167/checking-if-a-number-is-a-prime-number-in-python/27946768#27946768 - -## Step Zero: Prerequisites - -This example assumes - -- You have a Kubernetes cluster installed and running. -- That you have installed the ```kubectl``` command line tool installed in your path and configured to talk to your Kubernetes cluster -- That your Kubernetes cluster is running [kube-dns](https://github.com/kubernetes/dns) or an equivalent integration. - -Optionally, your Kubernetes cluster should be configured with a Loadbalancer integration (automatically configured via kube-up or GKE) - -## Step One: Create namespace - -```sh -$ kubectl create -f examples/spark/namespace-spark-cluster.yaml -``` - -Now list all namespaces: - -```sh -$ kubectl get namespaces -NAME LABELS STATUS -default Active -spark-cluster name=spark-cluster Active -``` - -To configure kubectl to work with our namespace, we will create a new context using our current context as a base: - -```sh -$ CURRENT_CONTEXT=$(kubectl config view -o jsonpath='{.current-context}') -$ USER_NAME=$(kubectl config view -o jsonpath='{.contexts[?(@.name == "'"${CURRENT_CONTEXT}"'")].context.user}') -$ CLUSTER_NAME=$(kubectl config view -o jsonpath='{.contexts[?(@.name == "'"${CURRENT_CONTEXT}"'")].context.cluster}') -$ kubectl config set-context spark --namespace=spark-cluster --cluster=${CLUSTER_NAME} --user=${USER_NAME} -$ kubectl config use-context spark -``` - -## Step Two: Start your Master service - -The Master [service](https://kubernetes.io/docs/user-guide/services.md) is the master service -for a Spark cluster. - -Use the -[`examples/spark/spark-master-controller.yaml`](spark-master-controller.yaml) -file to create a -[replication controller](https://kubernetes.io/docs/user-guide/replication-controller.md) -running the Spark Master service. - -```console -$ kubectl create -f examples/spark/spark-master-controller.yaml -replicationcontroller "spark-master-controller" created -``` - -Then, use the -[`examples/spark/spark-master-service.yaml`](spark-master-service.yaml) file to -create a logical service endpoint that Spark workers can use to access the -Master pod: - -```console -$ kubectl create -f examples/spark/spark-master-service.yaml -service "spark-master" created -``` - -### Check to see if Master is running and accessible - -```console -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -spark-master-controller-5u0q5 1/1 Running 0 8m -``` - -Check logs to see the status of the master. (Use the pod retrieved from the previous output.) - -```sh -$ kubectl logs spark-master-controller-5u0q5 -starting org.apache.spark.deploy.master.Master, logging to /opt/spark-1.5.1-bin-hadoop2.6/sbin/../logs/spark--org.apache.spark.deploy.master.Master-1-spark-master-controller-g0oao.out -Spark Command: /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java -cp /opt/spark-1.5.1-bin-hadoop2.6/sbin/../conf/:/opt/spark-1.5.1-bin-hadoop2.6/lib/spark-assembly-1.5.1-hadoop2.6.0.jar:/opt/spark-1.5.1-bin-hadoop2.6/lib/datanucleus-rdbms-3.2.9.jar:/opt/spark-1.5.1-bin-hadoop2.6/lib/datanucleus-core-3.2.10.jar:/opt/spark-1.5.1-bin-hadoop2.6/lib/datanucleus-api-jdo-3.2.6.jar -Xms1g -Xmx1g org.apache.spark.deploy.master.Master --ip spark-master --port 7077 --webui-port 8080 -======================================== -15/10/27 21:25:05 INFO Master: Registered signal handlers for [TERM, HUP, INT] -15/10/27 21:25:05 INFO SecurityManager: Changing view acls to: root -15/10/27 21:25:05 INFO SecurityManager: Changing modify acls to: root -15/10/27 21:25:05 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(root); users with modify permissions: Set(root) -15/10/27 21:25:06 INFO Slf4jLogger: Slf4jLogger started -15/10/27 21:25:06 INFO Remoting: Starting remoting -15/10/27 21:25:06 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkMaster@spark-master:7077] -15/10/27 21:25:06 INFO Utils: Successfully started service 'sparkMaster' on port 7077. -15/10/27 21:25:07 INFO Master: Starting Spark master at spark://spark-master:7077 -15/10/27 21:25:07 INFO Master: Running Spark version 1.5.1 -15/10/27 21:25:07 INFO Utils: Successfully started service 'MasterUI' on port 8080. -15/10/27 21:25:07 INFO MasterWebUI: Started MasterWebUI at http://spark-master:8080 -15/10/27 21:25:07 INFO Utils: Successfully started service on port 6066. -15/10/27 21:25:07 INFO StandaloneRestServer: Started REST server for submitting applications on port 6066 -15/10/27 21:25:07 INFO Master: I have been elected leader! New state: ALIVE -``` - -Once the master is started, we'll want to check the Spark WebUI. In order to access the Spark WebUI, we will deploy a [specialized proxy](https://github.com/aseigneurin/spark-ui-proxy). This proxy is neccessary to access worker logs from the Spark UI. - -Deploy the proxy controller with [`examples/spark/spark-ui-proxy-controller.yaml`](spark-ui-proxy-controller.yaml): - -```console -$ kubectl create -f examples/spark/spark-ui-proxy-controller.yaml -replicationcontroller "spark-ui-proxy-controller" created -``` - -We'll also need a corresponding Loadbalanced service for our Spark Proxy [`examples/spark/spark-ui-proxy-service.yaml`](spark-ui-proxy-service.yaml): - -```console -$ kubectl create -f examples/spark/spark-ui-proxy-service.yaml -service "spark-ui-proxy" created -``` - -After creating the service, you should eventually get a loadbalanced endpoint: - -```console -$ kubectl get svc spark-ui-proxy -o wide - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR -spark-ui-proxy 10.0.51.107 aad59283284d611e6839606c214502b5-833417581.us-east-1.elb.amazonaws.com 80/TCP 9m component=spark-ui-proxy -``` - -The Spark UI in the above example output will be available at http://aad59283284d611e6839606c214502b5-833417581.us-east-1.elb.amazonaws.com - -If your Kubernetes cluster is not equipped with a Loadbalancer integration, you will need to use the [kubectl proxy](https://kubernetes.io/docs/user-guide/accessing-the-cluster.md#using-kubectl-proxy) to -connect to the Spark WebUI: - -```console -kubectl proxy --port=8001 -``` - -At which point the UI will be available at -[http://localhost:8001/api/v1/proxy/namespaces/spark-cluster/services/spark-master:8080/](http://localhost:8001/api/v1/proxy/namespaces/spark-cluster/services/spark-master:8080/). - -## Step Three: Start your Spark workers - -The Spark workers do the heavy lifting in a Spark cluster. They -provide execution resources and data cache capabilities for your -program. - -The Spark workers need the Master service to be running. - -Use the [`examples/spark/spark-worker-controller.yaml`](spark-worker-controller.yaml) file to create a -[replication controller](https://kubernetes.io/docs/user-guide/replication-controller.md) that manages the worker pods. - -```console -$ kubectl create -f examples/spark/spark-worker-controller.yaml -replicationcontroller "spark-worker-controller" created -``` - -### Check to see if the workers are running - -If you launched the Spark WebUI, your workers should just appear in the UI when -they're ready. (It may take a little bit to pull the images and launch the -pods.) You can also interrogate the status in the following way: - -```console -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -spark-master-controller-5u0q5 1/1 Running 0 25m -spark-worker-controller-e8otp 1/1 Running 0 6m -spark-worker-controller-fiivl 1/1 Running 0 6m -spark-worker-controller-ytc7o 1/1 Running 0 6m - -$ kubectl logs spark-master-controller-5u0q5 -[...] -15/10/26 18:20:14 INFO Master: Registering worker 10.244.1.13:53567 with 2 cores, 6.3 GB RAM -15/10/26 18:20:14 INFO Master: Registering worker 10.244.2.7:46195 with 2 cores, 6.3 GB RAM -15/10/26 18:20:14 INFO Master: Registering worker 10.244.3.8:39926 with 2 cores, 6.3 GB RAM -``` - -## Step Four: Start the Zeppelin UI to launch jobs on your Spark cluster - -The Zeppelin UI pod can be used to launch jobs into the Spark cluster either via -a web notebook frontend or the traditional Spark command line. See -[Zeppelin](https://zeppelin.incubator.apache.org/) and -[Spark architecture](https://spark.apache.org/docs/latest/cluster-overview.html) -for more details. - -Deploy Zeppelin: - -```console -$ kubectl create -f examples/spark/zeppelin-controller.yaml -replicationcontroller "zeppelin-controller" created -``` - -And the corresponding service: - -```console -$ kubectl create -f examples/spark/zeppelin-service.yaml -service "zeppelin" created -``` - -Zeppelin needs the spark-master service to be running. - -### Check to see if Zeppelin is running - -```console -$ kubectl get pods -l component=zeppelin -NAME READY STATUS RESTARTS AGE -zeppelin-controller-ja09s 1/1 Running 0 53s -``` - -## Step Five: Do something with the cluster - -Now you have two choices, depending on your predilections. You can do something -graphical with the Spark cluster, or you can stay in the CLI. - -For both choices, we will be working with this Python snippet: - -```python -from math import sqrt; from itertools import count, islice - -def isprime(n): - return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1))) - -nums = sc.parallelize(xrange(10000000)) -print nums.filter(isprime).count() -``` - -### Do something fast with pyspark! - -Simply copy and paste the python snippet into pyspark from within the zeppelin pod: - -```console -$ kubectl exec zeppelin-controller-ja09s -it pyspark -Python 2.7.9 (default, Mar 1 2015, 12:57:24) -[GCC 4.9.2] on linux2 -Type "help", "copyright", "credits" or "license" for more information. -Welcome to - ____ __ - / __/__ ___ _____/ /__ - _\ \/ _ \/ _ `/ __/ '_/ - /__ / .__/\_,_/_/ /_/\_\ version 1.5.1 - /_/ - -Using Python version 2.7.9 (default, Mar 1 2015 12:57:24) -SparkContext available as sc, HiveContext available as sqlContext. ->>> from math import sqrt; from itertools import count, islice ->>> ->>> def isprime(n): -... return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1))) -... ->>> nums = sc.parallelize(xrange(10000000)) - ->>> print nums.filter(isprime).count() -664579 -``` - -Congratulations, you now know how many prime numbers there are within the first 10 million numbers! - -### Do something graphical and shiny! - -Creating the Zeppelin service should have yielded you a Loadbalancer endpoint: - -```console -$ kubectl get svc zeppelin -o wide - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR -zeppelin 10.0.154.1 a596f143884da11e6839506c114532b5-121893930.us-east-1.elb.amazonaws.com 80/TCP 3m component=zeppelin -``` - -If your Kubernetes cluster does not have a Loadbalancer integration, then we will have to use port forwarding. - -Take the Zeppelin pod from before and port-forward the WebUI port: - -```console -$ kubectl port-forward zeppelin-controller-ja09s 8080:8080 -``` - -This forwards `localhost` 8080 to container port 8080. You can then find -Zeppelin at [http://localhost:8080/](http://localhost:8080/). - -Once you've loaded up the Zeppelin UI, create a "New Notebook". In there we will paste our python snippet, but we need to add a `%pyspark` hint for Zeppelin to understand it: - -``` -%pyspark -from math import sqrt; from itertools import count, islice - -def isprime(n): - return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1))) - -nums = sc.parallelize(xrange(10000000)) -print nums.filter(isprime).count() -``` - -After pasting in our code, press shift+enter or click the play icon to the right of our snippet. The Spark job will run and once again we'll have our result! - -## Result - -You now have services and replication controllers for the Spark master, Spark -workers and Spark driver. You can take this example to the next step and start -using the Apache Spark cluster you just created, see -[Spark documentation](https://spark.apache.org/documentation.html) for more -information. - -## tl;dr - -```console -kubectl create -f examples/spark -``` - -After it's setup: - -```console -kubectl get pods # Make sure everything is running -kubectl get svc -o wide # Get the Loadbalancer endpoints for spark-ui-proxy and zeppelin -``` - -At which point the Master UI and Zeppelin will be available at the URLs under the `EXTERNAL-IP` field. - -You can also interact with the Spark cluster using the traditional `spark-shell` / -`spark-subsubmit` / `pyspark` commands by using `kubectl exec` against the -`zeppelin-controller` pod. - -If your Kubernetes cluster does not have a Loadbalancer integration, use `kubectl proxy` and `kubectl port-forward` to access the Spark UI and Zeppelin. - -For Spark UI: - -```console -kubectl proxy --port=8001 -``` - -Then visit [http://localhost:8001/api/v1/proxy/namespaces/spark-cluster/services/spark-ui-proxy/](http://localhost:8001/api/v1/proxy/namespaces/spark-cluster/services/spark-ui-proxy/). - -For Zeppelin: - -```console -kubectl port-forward zeppelin-controller-abc123 8080:8080 & -``` - -Then visit [http://localhost:8080/](http://localhost:8080/). - -## Known Issues With Spark - -* This provides a Spark configuration that is restricted to the cluster network, - meaning the Spark master is only available as a cluster service. If you need - to submit jobs using external client other than Zeppelin or `spark-submit` on - the `zeppelin` pod, you will need to provide a way for your clients to get to - the - [`examples/spark/spark-master-service.yaml`](spark-master-service.yaml). See - [Services](https://kubernetes.io/docs/user-guide/services.md) for more information. - -## Known Issues With Zeppelin - -* The Zeppelin pod is large, so it may take a while to pull depending on your - network. The size of the Zeppelin pod is something we're working on, see issue #17231. - -* Zeppelin may take some time (about a minute) on this pipeline the first time - you run it. It seems to take considerable time to load. - -* On GKE, `kubectl port-forward` may not be stable over long periods of time. If - you see Zeppelin go into `Disconnected` state (there will be a red dot on the - top right as well), the `port-forward` probably failed and needs to be - restarted. See #12179. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/spark/README.md](https://github.com/kubernetes/examples/blob/master/staging/spark/README.md) diff --git a/examples/spark/spark-gluster/README.md b/examples/spark/spark-gluster/README.md index 348a64eb875..07d2a8d0f3f 100644 --- a/examples/spark/spark-gluster/README.md +++ b/examples/spark/spark-gluster/README.md @@ -1,123 +1 @@ -# Spark on GlusterFS example - -This guide is an extension of the standard [Spark on Kubernetes Guide](../../../examples/spark/) and describes how to run Spark on GlusterFS using the [Kubernetes Volume Plugin for GlusterFS](../../../examples/volumes/glusterfs/) - -The setup is the same in that you will setup a Spark Master Service in the same way you do with the standard Spark guide but you will deploy a modified Spark Master and a Modified Spark Worker ReplicationController, as they will be modified to use the GlusterFS volume plugin to mount a GlusterFS volume into the Spark Master and Spark Workers containers. Note that this example can be used as a guide for implementing any of the Kubernetes Volume Plugins with the Spark Example. - -[There is also a video available that provides a walkthrough for how to set this solution up](https://youtu.be/xyIaoM0-gM0) - -## Step Zero: Prerequisites - -This example assumes that you have been able to successfully get the standard Spark Example working in Kubernetes and that you have a GlusterFS cluster that is accessible from your Kubernetes cluster. It is also recommended that you are familiar with the GlusterFS Volume Plugin and how to configure it. - -## Step One: Define the endpoints for your GlusterFS Cluster - -Modify the `examples/spark/spark-gluster/glusterfs-endpoints.yaml` file to list the IP addresses of some of the servers in your GlusterFS cluster. The GlusterFS Volume Plugin uses these IP addresses to perform a Fuse Mount of the GlusterFS Volume into the Spark Worker Containers that are launched by the ReplicationController in the next section. - -Register your endpoints by running the following command: - -```console -$ kubectl create -f examples/spark/spark-gluster/glusterfs-endpoints.yaml -``` - -## Step Two: Modify and Submit your Spark Master ReplicationController - -Modify the `examples/spark/spark-gluster/spark-master-controller.yaml` file to reflect the GlusterFS Volume that you wish to use in the PATH parameter of the volumes subsection. - -Submit the Spark Master Pod - -```console -$ kubectl create -f examples/spark/spark-gluster/spark-master-controller.yaml -``` - -Verify that the Spark Master Pod deployed successfully. - -```console -$ kubectl get pods -``` - -Submit the Spark Master Service - -```console -$ kubectl create -f examples/spark/spark-gluster/spark-master-service.yaml -``` - -Verify that the Spark Master Service deployed successfully. - -```console -$ kubectl get services -``` - -## Step Three: Start your Spark workers - -Modify the `examples/spark/spark-gluster/spark-worker-controller.yaml` file to reflect the GlusterFS Volume that you wish to use in the PATH parameter of the Volumes subsection. - -Make sure that the replication factor for the pods is not greater than the amount of Kubernetes nodes available in your Kubernetes cluster. - -Submit your Spark Worker ReplicationController by running the following command: - -```console -$ kubectl create -f examples/spark/spark-gluster/spark-worker-controller.yaml -``` - -Verify that the Spark Worker ReplicationController deployed its pods successfully. - -```console -$ kubectl get pods -``` - -Follow the steps from the standard example to verify the Spark Worker pods have registered successfully with the Spark Master. - -## Step Four: Submit a Spark Job - -All the Spark Workers and the Spark Master in your cluster have a mount to GlusterFS. This means that any of them can be used as the Spark Client to submit a job. For simplicity, lets use the Spark Master as an example. - - -The Spark Worker and Spark Master containers include a setup_client utility script that takes two parameters, the Service IP of the Spark Master and the port that it is running on. This must be to setup the container as a Spark client prior to submitting any Spark Jobs. - -Obtain the Service IP (listed as IP:) and Full Pod Name by running - -```console -$ kubectl describe pod spark-master-controller -``` - -Now we will shell into the Spark Master Container and run a Spark Job. In the example below, we are running the Spark Wordcount example and specifying the input and output directory at the location where GlusterFS is mounted in the Spark Master Container. This will submit the job to the Spark Master who will distribute the work to all the Spark Worker Containers. - -All the Spark Worker containers will be able to access the data as they all have the same GlusterFS volume mounted at /mnt/glusterfs. The reason we are submitting the job from a Spark Worker and not an additional Spark Base container (as in the standard Spark Example) is due to the fact that the Spark instance submitting the job must be able to access the data. Only the Spark Master and Spark Worker containers have GlusterFS mounted. - -The Spark Worker and Spark Master containers include a setup_client utility script that takes two parameters, the Service IP of the Spark Master and the port that it is running on. This must be done to setup the container as a Spark client prior to submitting any Spark Jobs. - -Shell into the Master Spark Node (spark-master-controller) by running - -```console -kubectl exec spark-master-controller- -i -t -- bash -i - -root@spark-master-controller-c1sqd:/# . /setup_client.sh 7077 -root@spark-master-controller-c1sqd:/# pyspark - -Python 2.7.9 (default, Mar 1 2015, 12:57:24) -[GCC 4.9.2] on linux2 -Type "help", "copyright", "credits" or "license" for more information. -15/06/26 14:25:28 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -Welcome to - ____ __ - / __/__ ___ _____/ /__ - _\ \/ _ \/ _ `/ __/ '_/ - /__ / .__/\_,_/_/ /_/\_\ version 1.4.0 - /_/ -Using Python version 2.7.9 (default, Mar 1 2015 12:57:24) -SparkContext available as sc, HiveContext available as sqlContext. ->>> file = sc.textFile("/mnt/glusterfs/somefile.txt") ->>> counts = file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b) ->>> counts.saveAsTextFile("/mnt/glusterfs/output") -``` - -While still in the container, you can see the output of your Spark Job in the Distributed File System by running the following: - -```console -root@spark-master-controller-c1sqd:/# ls -l /mnt/glusterfs/output -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/spark-gluster/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/spark/spark-gluster/README.md](https://github.com/kubernetes/examples/blob/master/staging/spark/spark-gluster/README.md) diff --git a/examples/storage/cassandra/README.md b/examples/storage/cassandra/README.md index 95f63f42ea9..5afcd19131f 100644 --- a/examples/storage/cassandra/README.md +++ b/examples/storage/cassandra/README.md @@ -1,854 +1 @@ - -# Cloud Native Deployments of Cassandra using Kubernetes - -## Table of Contents - - - [Prerequisites](#prerequisites) - - [Cassandra Docker](#cassandra-docker) - - [Quickstart](#quickstart) - - [Step 1: Create a Cassandra Headless Service](#step-1-create-a-cassandra-headless-service) - - [Step 2: Use a StatefulSet to create Cassandra Ring](#step-2-use-a-statefulset-to-create-cassandra-ring) - - [Step 3: Validate and Modify The Cassandra StatefulSet](#step-3-validate-and-modify-the-cassandra-statefulset) - - [Step 4: Delete Cassandra StatefulSet](#step-4-delete-cassandra-statefulset) - - [Step 5: Use a Replication Controller to create Cassandra node pods](#step-5-use-a-replication-controller-to-create-cassandra-node-pods) - - [Step 6: Scale up the Cassandra cluster](#step-6-scale-up-the-cassandra-cluster) - - [Step 7: Delete the Replication Controller](#step-7-delete-the-replication-controller) - - [Step 8: Use a DaemonSet instead of a Replication Controller](#step-8-use-a-daemonset-instead-of-a-replication-controller) - - [Step 9: Resource Cleanup](#step-9-resource-cleanup) - - [Seed Provider Source](#seed-provider-source) - -The following document describes the development of a _cloud native_ -[Cassandra](http://cassandra.apache.org/) deployment on Kubernetes. When we say -_cloud native_, we mean an application which understands that it is running -within a cluster manager, and uses this cluster management infrastructure to -help implement the application. In particular, in this instance, a custom -Cassandra `SeedProvider` is used to enable Cassandra to dynamically discover -new Cassandra nodes as they join the cluster. - -This example also uses some of the core components of Kubernetes: - -- [_Pods_](https://kubernetes.io/docs/user-guide/pods.md) -- [ _Services_](https://kubernetes.io/docs/user-guide/services.md) -- [_Replication Controllers_](https://kubernetes.io/docs/user-guide/replication-controller.md) -- [_Stateful Sets_](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -- [_Daemon Sets_](https://kubernetes.io/docs/admin/daemons.md) - -## Prerequisites - -This example assumes that you have a Kubernetes version >=1.2 cluster installed and running, -and that you have installed the [`kubectl`](https://kubernetes.io/docs/user-guide/kubectl/kubectl.md) -command line tool somewhere in your path. Please see the -[getting started guides](https://kubernetes.io/docs/getting-started-guides/) -for installation instructions for your platform. - -This example also has a few code and configuration files needed. To avoid -typing these out, you can `git clone` the Kubernetes repository to your local -computer. - -## Cassandra Docker - -The pods use the [```gcr.io/google-samples/cassandra:v12```](image/Dockerfile) -image from Google's [container registry](https://cloud.google.com/container-registry/docs/). -The docker is based on `debian:jessie` and includes OpenJDK 8. This image -includes a standard Cassandra installation from the Apache Debian repo. Through the use of environment variables you are able to change values that are inserted into the `cassandra.yaml`. - -| ENV VAR | DEFAULT VALUE | -| ------------- |:-------------: | -| CASSANDRA_CLUSTER_NAME | 'Test Cluster' | -| CASSANDRA_NUM_TOKENS | 32 | -| CASSANDRA_RPC_ADDRESS | 0.0.0.0 | - -## Quickstart - -If you want to jump straight to the commands we will run, -here are the steps: - -```sh -# -# StatefulSet -# - -# create a service to track all cassandra statefulset nodes -kubectl create -f examples/storage/cassandra/cassandra-service.yaml - -# create a statefulset -kubectl create -f examples/storage/cassandra/cassandra-statefulset.yaml - -# validate the Cassandra cluster. Substitute the name of one of your pods. -kubectl exec -ti cassandra-0 -- nodetool status - -# cleanup -grace=$(kubectl get po cassandra-0 --template '{{.spec.terminationGracePeriodSeconds}}') \ - && kubectl delete statefulset,po -l app=cassandra \ - && echo "Sleeping $grace" \ - && sleep $grace \ - && kubectl delete pvc -l app=cassandra - -# -# Resource Controller Example -# - -# create a replication controller to replicate cassandra nodes -kubectl create -f examples/storage/cassandra/cassandra-controller.yaml - -# validate the Cassandra cluster. Substitute the name of one of your pods. -kubectl exec -ti cassandra-xxxxx -- nodetool status - -# scale up the Cassandra cluster -kubectl scale rc cassandra --replicas=4 - -# delete the replication controller -kubectl delete rc cassandra - -# -# Create a DaemonSet to place a cassandra node on each kubernetes node -# - -kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml --validate=false - -# resource cleanup -kubectl delete service -l app=cassandra -kubectl delete daemonset cassandra -``` - -## Step 1: Create a Cassandra Headless Service - -A Kubernetes _[Service](https://kubernetes.io/docs/user-guide/services.md)_ describes a set of -[_Pods_](https://kubernetes.io/docs/user-guide/pods.md) that perform the same task. In -Kubernetes, the atomic unit of an application is a Pod: one or more containers -that _must_ be scheduled onto the same host. - -The Service is used for DNS lookups between Cassandra Pods, and Cassandra clients -within the Kubernetes Cluster. - -Here is the service description: - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - app: cassandra - name: cassandra -spec: - clusterIP: None - ports: - - port: 9042 - selector: - app: cassandra -``` - -[Download example](cassandra-service.yaml?raw=true) - - -Create the service for the StatefulSet: - - -```console -$ kubectl create -f examples/storage/cassandra/cassandra-service.yaml -``` - -The following command shows if the service has been created. - -```console -$ kubectl get svc cassandra -``` - -The response should be like: - -```console -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -cassandra None 9042/TCP 45s -``` - -If an error is returned the service create failed. - -## Step 2: Use a StatefulSet to create Cassandra Ring - -StatefulSets (previously PetSets) are a feature that was upgraded to a Beta component in -Kubernetes 1.5. Deploying stateful distributed applications, like Cassandra, within a clustered -environment can be challenging. We implemented StatefulSet to greatly simplify this -process. Multiple StatefulSet features are used within this example, but is out of -scope of this documentation. [Please refer to the Stateful Set documentation.](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - -The StatefulSet manifest that is included below, creates a Cassandra ring that consists -of three pods. - -This example includes using a GCE Storage Class, please update appropriately depending -on the cloud you are working with. - - - -```yaml -apiVersion: "apps/v1beta1" -kind: StatefulSet -metadata: - name: cassandra -spec: - serviceName: cassandra - replicas: 3 - template: - metadata: - labels: - app: cassandra - spec: - containers: - - name: cassandra - image: gcr.io/google-samples/cassandra:v12 - imagePullPolicy: Always - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - resources: - limits: - cpu: "500m" - memory: 1Gi - requests: - cpu: "500m" - memory: 1Gi - securityContext: - capabilities: - add: - - IPC_LOCK - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "PID=$(pidof java) && kill $PID && while ps -p $PID > /dev/null; do sleep 1; done"] - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEEDS - value: "cassandra-0.cassandra.default.svc.cluster.local" - - name: CASSANDRA_CLUSTER_NAME - value: "K8Demo" - - name: CASSANDRA_DC - value: "DC1-K8Demo" - - name: CASSANDRA_RACK - value: "Rack1-K8Demo" - - name: CASSANDRA_AUTO_BOOTSTRAP - value: "false" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - readinessProbe: - exec: - command: - - /bin/bash - - -c - - /ready-probe.sh - initialDelaySeconds: 15 - timeoutSeconds: 5 - # These volume mounts are persistent. They are like inline claims, - # but not exactly because the names need to match exactly one of - # the stateful pod volumes. - volumeMounts: - - name: cassandra-data - mountPath: /cassandra_data - # These are converted to volume claims by the controller - # and mounted at the paths mentioned above. - # do not use these in production until ssd GCEPersistentDisk or other ssd pd - volumeClaimTemplates: - - metadata: - name: cassandra-data - annotations: - volume.beta.kubernetes.io/storage-class: fast - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1beta1 -metadata: - name: fast -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-ssd -``` - -[Download example](cassandra-statefulset.yaml?raw=true) - - -Create the Cassandra StatefulSet as follows: - -```console -$ kubectl create -f examples/storage/cassandra/cassandra-statefulset.yaml -``` - -## Step 3: Validate and Modify The Cassandra StatefulSet - -Deploying this StatefulSet shows off two of the new features that StatefulSets provides. - -1. The pod names are known -2. The pods deploy in incremental order - -First validate that the StatefulSet has deployed, by running `kubectl` command below. - -```console -$ kubectl get statefulset cassandra -``` - -The command should respond like: - -```console -NAME DESIRED CURRENT AGE -cassandra 3 3 13s -``` - -Next watch the Cassandra pods deploy, one after another. The StatefulSet resource -deploys pods in a number fashion: 1, 2, 3, etc. If you execute the following -command before the pods deploy you are able to see the ordered creation. - -```console -$ kubectl get pods -l="app=cassandra" -NAME READY STATUS RESTARTS AGE -cassandra-0 1/1 Running 0 1m -cassandra-1 0/1 ContainerCreating 0 8s -``` - -The above example shows two of the three pods in the Cassandra StatefulSet deployed. -Once all of the pods are deployed the same command will respond with the full -StatefulSet. - -```console -$ kubectl get pods -l="app=cassandra" -NAME READY STATUS RESTARTS AGE -cassandra-0 1/1 Running 0 10m -cassandra-1 1/1 Running 0 9m -cassandra-2 1/1 Running 0 8m -``` - -Running the Cassandra utility `nodetool` will display the status of the ring. - -```console -$ kubectl exec cassandra-0 -- nodetool status -Datacenter: DC1-K8Demo -====================== -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.4.2.4 65.26 KiB 32 63.7% a9d27f81-6783-461d-8583-87de2589133e Rack1-K8Demo -UN 10.4.0.4 102.04 KiB 32 66.7% 5559a58c-8b03-47ad-bc32-c621708dc2e4 Rack1-K8Demo -UN 10.4.1.4 83.06 KiB 32 69.6% 9dce943c-581d-4c0e-9543-f519969cc805 Rack1-K8Demo -``` - -You can also run `cqlsh` to describe the keyspaces in the cluster. - -```console -$ kubectl exec cassandra-0 -- cqlsh -e 'desc keyspaces' - -system_traces system_schema system_auth system system_distributed -``` - -In order to increase or decrease the size of the Cassandra StatefulSet, you must use -`kubectl edit`. You can find more information about the edit command in the [documentation](https://kubernetes.io/docs/user-guide/kubectl/kubectl_edit.md). - -Use the following command to edit the StatefulSet. - -```console -$ kubectl edit statefulset cassandra -``` - -This will create an editor in your terminal. The line you are looking to change is -`replicas`. The example does on contain the entire contents of the terminal window, and -the last line of the example below is the replicas line that you want to change. - -```console -# Please edit the object below. Lines beginning with a '#' will be ignored, -# and an empty file will abort the edit. If an error occurs while saving this file will be -# reopened with the relevant failures. -# -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - creationTimestamp: 2016-08-13T18:40:58Z - generation: 1 - labels: - app: cassandra - name: cassandra - namespace: default - resourceVersion: "323" - selfLink: /apis/apps/v1beta1/namespaces/default/statefulsets/cassandra - uid: 7a219483-6185-11e6-a910-42010a8a0fc0 -spec: - replicas: 3 -``` - -Modify the manifest to the following, and save the manifest. - -```console -spec: - replicas: 4 -``` - -The StatefulSet will now contain four pods. - -```console -$ kubectl get statefulset cassandra -``` - -The command should respond like: - -```console -NAME DESIRED CURRENT AGE -cassandra 4 4 36m -``` - -For the Kubernetes 1.5 release, the beta StatefulSet resource does not have `kubectl scale` -functionality, like a Deployment, ReplicaSet, Replication Controller, or Job. - -## Step 4: Delete Cassandra StatefulSet - -Deleting and/or scaling a StatefulSet down will not delete the volumes associated with the StatefulSet. This is done to ensure safety first, your data is more valuable than an auto purge of all related StatefulSet resources. Deleting the Persistent Volume Claims may result in a deletion of the associated volumes, depending on the storage class and reclaim policy. You should never assume ability to access a volume after claim deletion. - -Use the following commands to delete the StatefulSet. - -```console -$ grace=$(kubectl get po cassandra-0 --template '{{.spec.terminationGracePeriodSeconds}}') \ - && kubectl delete statefulset -l app=cassandra \ - && echo "Sleeping $grace" \ - && sleep $grace \ - && kubectl delete pvc -l app=cassandra -``` - -## Step 5: Use a Replication Controller to create Cassandra node pods - -A Kubernetes -_[Replication Controller](https://kubernetes.io/docs/user-guide/replication-controller.md)_ -is responsible for replicating sets of identical pods. Like a -Service, it has a selector query which identifies the members of its set. -Unlike a Service, it also has a desired number of replicas, and it will create -or delete Pods to ensure that the number of Pods matches up with its -desired state. - -The Replication Controller, in conjunction with the Service we just defined, -will let us easily build a replicated, scalable Cassandra cluster. - -Let's create a replication controller with two initial replicas. - - - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: cassandra - # The labels will be applied automatically - # from the labels in the pod template, if not set - # labels: - # app: cassandra -spec: - replicas: 2 - # The selector will be applied automatically - # from the labels in the pod template, if not set. - # selector: - # app: cassandra - template: - metadata: - labels: - app: cassandra - spec: - containers: - - command: - - /run.sh - resources: - limits: - cpu: 0.5 - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEED_PROVIDER - value: "io.k8s.cassandra.KubernetesSeedProvider" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: gcr.io/google-samples/cassandra:v12 - name: cassandra - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - volumeMounts: - - mountPath: /cassandra_data - name: data - volumes: - - name: data - emptyDir: {} -``` - -[Download example](cassandra-controller.yaml?raw=true) - - -There are a few things to note in this description. - -The `selector` attribute contains the controller's selector query. It can be -explicitly specified, or applied automatically from the labels in the pod -template if not set, as is done here. - -The pod template's label, `app:cassandra`, matches the Service selector -from Step 1. This is how pods created by this replication controller are picked up -by the Service." - -The `replicas` attribute specifies the desired number of replicas, in this -case 2 initially. We'll scale up to more shortly. - -Create the Replication Controller: - -```console - -$ kubectl create -f examples/storage/cassandra/cassandra-controller.yaml - -``` - -You can list the new controller: - -```console - -$ kubectl get rc -o wide -NAME DESIRED CURRENT AGE CONTAINER(S) IMAGE(S) SELECTOR -cassandra 2 2 11s cassandra gcr.io/google-samples/cassandra:v12 app=cassandra - -``` - -Now if you list the pods in your cluster, and filter to the label -`app=cassandra`, you should see two Cassandra pods. (The `wide` argument lets -you see which Kubernetes nodes the pods were scheduled onto.) - -```console - -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-21qyy 1/1 Running 0 1m kubernetes-minion-b286 -cassandra-q6sz7 1/1 Running 0 1m kubernetes-minion-9ye5 - -``` - -Because these pods have the label `app=cassandra`, they map to the service we -defined in Step 1. - -You can check that the Pods are visible to the Service using the following service endpoints query: - -```console - -$ kubectl get endpoints cassandra -o yaml -apiVersion: v1 -kind: Endpoints -metadata: - creationTimestamp: 2015-06-21T22:34:12Z - labels: - app: cassandra - name: cassandra - namespace: default - resourceVersion: "944373" - selfLink: /api/v1/namespaces/default/endpoints/cassandra - uid: a3d6c25f-1865-11e5-a34e-42010af01bcc -subsets: -- addresses: - - ip: 10.244.3.15 - targetRef: - kind: Pod - name: cassandra - namespace: default - resourceVersion: "944372" - uid: 9ef9895d-1865-11e5-a34e-42010af01bcc - ports: - - port: 9042 - protocol: TCP - -``` - -To show that the `SeedProvider` logic is working as intended, you can use the -`nodetool` command to examine the status of the Cassandra cluster. To do this, -use the `kubectl exec` command, which lets you run `nodetool` in one of your -Cassandra pods. Again, substitute `cassandra-xxxxx` with the actual name of one -of your pods. - -```console - -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.5 74.09 KB 256 100.0% 86feda0f-f070-4a5b-bda1-2eeb0ad08b77 rack1 -UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e7e80dce2b rack1 - -``` - -## Step 6: Scale up the Cassandra cluster - -Now let's scale our Cassandra cluster to 4 pods. We do this by telling the -Replication Controller that we now want 4 replicas. - -```sh - -$ kubectl scale rc cassandra --replicas=4 - -``` - -You can see the new pods listed: - -```console - -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-21qyy 1/1 Running 0 6m kubernetes-minion-b286 -cassandra-81m2l 1/1 Running 0 47s kubernetes-minion-b286 -cassandra-8qoyp 1/1 Running 0 47s kubernetes-minion-9ye5 -cassandra-q6sz7 1/1 Running 0 6m kubernetes-minion-9ye5 - -``` - -In a few moments, you can examine the Cassandra cluster status again, and see -that the new pods have been detected by the custom `SeedProvider`: - -```console - -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.6 51.67 KB 256 48.9% d07b23a5-56a1-4b0b-952d-68ab95869163 rack1 -UN 10.244.1.5 84.71 KB 256 50.7% e060df1f-faa2-470c-923d-ca049b0f3f38 rack1 -UN 10.244.1.6 84.71 KB 256 47.0% 83ca1580-4f3c-4ec5-9b38-75036b7a297f rack1 -UN 10.244.0.5 68.2 KB 256 53.4% 72ca27e2-c72c-402a-9313-1e4b61c2f839 rack1 - -``` - -## Step 7: Delete the Replication Controller - -Before you start Step 5, __delete the replication controller__ you created above: - -```sh - -$ kubectl delete rc cassandra - -``` - -## Step 8: Use a DaemonSet instead of a Replication Controller - -In Kubernetes, a [_Daemon Set_](https://kubernetes.io/docs/admin/daemons.md) can distribute pods -onto Kubernetes nodes, one-to-one. Like a _ReplicationController_, it has a -selector query which identifies the members of its set. Unlike a -_ReplicationController_, it has a node selector to limit which nodes are -scheduled with the templated pods, and replicates not based on a set target -number of pods, but rather assigns a single pod to each targeted node. - -An example use case: when deploying to the cloud, the expectation is that -instances are ephemeral and might die at any time. Cassandra is built to -replicate data across the cluster to facilitate data redundancy, so that in the -case that an instance dies, the data stored on the instance does not, and the -cluster can react by re-replicating the data to other running nodes. - -`DaemonSet` is designed to place a single pod on each node in the Kubernetes -cluster. That will give us data redundancy. Let's create a -DaemonSet to start our storage cluster: - - - -```yaml -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - name: cassandra - name: cassandra -spec: - template: - metadata: - labels: - app: cassandra - spec: - # Filter to specific nodes: - # nodeSelector: - # app: cassandra - containers: - - command: - - /run.sh - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEED_PROVIDER - value: "io.k8s.cassandra.KubernetesSeedProvider" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: gcr.io/google-samples/cassandra:v12 - name: cassandra - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - # If you need it it is going away in C* 4.0 - #- containerPort: 9160 - # name: thrift - resources: - requests: - cpu: 0.5 - volumeMounts: - - mountPath: /cassandra_data - name: data - volumes: - - name: data - emptyDir: {} -``` - -[Download example](cassandra-daemonset.yaml?raw=true) - - -Most of this DaemonSet definition is identical to the ReplicationController -definition above; it simply gives the daemon set a recipe to use when it creates -new Cassandra pods, and targets all Cassandra nodes in the cluster. - -Differentiating aspects are the `nodeSelector` attribute, which allows the -DaemonSet to target a specific subset of nodes (you can label nodes just like -other resources), and the lack of a `replicas` attribute due to the 1-to-1 node- -pod relationship. - -Create this DaemonSet: - -```console - -$ kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml - -``` - -You may need to disable config file validation, like so: - -```console - -$ kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml --validate=false - -``` - -You can see the DaemonSet running: - -```console - -$ kubectl get daemonset -NAME DESIRED CURRENT NODE-SELECTOR -cassandra 3 3 - -``` - -Now, if you list the pods in your cluster, and filter to the label -`app=cassandra`, you should see one (and only one) new cassandra pod for each -node in your network. - -```console - -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-ico4r 1/1 Running 0 4s kubernetes-minion-rpo1 -cassandra-kitfh 1/1 Running 0 1s kubernetes-minion-9ye5 -cassandra-tzw89 1/1 Running 0 2s kubernetes-minion-b286 - -``` - -To prove that this all worked as intended, you can again use the `nodetool` -command to examine the status of the cluster. To do this, use the `kubectl -exec` command to run `nodetool` in one of your newly-launched cassandra pods. - -```console - -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.5 74.09 KB 256 100.0% 86feda0f-f070-4a5b-bda1-2eeb0ad08b77 rack1 -UN 10.244.4.2 32.45 KB 256 100.0% 0b1be71a-6ffb-4895-ac3e-b9791299c141 rack1 -UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e7e80dce2b rack1 - -``` - -**Note**: This example had you delete the cassandra Replication Controller before -you created the DaemonSet. This is because – to keep this example simple – the -RC and the DaemonSet are using the same `app=cassandra` label (so that their pods map to the -service we created, and so that the SeedProvider can identify them). - -If we didn't delete the RC first, the two resources would conflict with -respect to how many pods they wanted to have running. If we wanted, we could support running -both together by using additional labels and selectors. - -## Step 9: Resource Cleanup - -When you are ready to take down your resources, do the following: - -```console - -$ kubectl delete service -l app=cassandra -$ kubectl delete daemonset cassandra - -``` - -### Custom Seed Provider - -A custom [`SeedProvider`](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) -is included for running Cassandra on top of Kubernetes. Only when you deploy Cassandra -via a replication control or a daemonset, you will need to use the custom seed provider. -In Cassandra, a `SeedProvider` bootstraps the gossip protocol that Cassandra uses to find other -Cassandra nodes. Seed addresses are hosts deemed as contact points. Cassandra -instances use the seed list to find each other and learn the topology of the -ring. The [`KubernetesSeedProvider`](java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java) -discovers Cassandra seeds IP addresses via the Kubernetes API, those Cassandra -instances are defined within the Cassandra Service. - -Refer to the custom seed provider [README](java/README.md) for further -`KubernetesSeedProvider` configurations. For this example you should not need -to customize the Seed Provider configurations. - -See the [image](image/) directory of this example for specifics on -how the container docker image was built and what it contains. - -You may also note that we are setting some Cassandra parameters (`MAX_HEAP_SIZE` -and `HEAP_NEWSIZE`), and adding information about the -[namespace](https://kubernetes.io/docs/user-guide/namespaces.md). -We also tell Kubernetes that the container exposes -both the `CQL` and `Thrift` API ports. Finally, we tell the cluster -manager that we need 0.1 cpu (0.1 core). - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/cassandra/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/cassandra/README.md](https://github.com/kubernetes/examples/blob/master/cassandra/README.md) diff --git a/examples/storage/cassandra/java/README.md b/examples/storage/cassandra/java/README.md index 45950f59de8..37894f98e0b 100644 --- a/examples/storage/cassandra/java/README.md +++ b/examples/storage/cassandra/java/README.md @@ -1,34 +1 @@ -# Cassandra on Kubernetes Custom Seed Provider: releases.k8s.io/HEAD - -Within any deployment of Cassandra a Seed Provider is used to for node discovery and communication. When a Cassandra node first starts it must discover which nodes, or seeds, for the information about the Cassandra nodes in the ring / rack / datacenter. - -This Java project provides a custom Seed Provider which communicates with the Kubernetes API to discover the required information. This provider is bundled with the Docker provided in this example. - -# Configuring the Seed Provider - -The following environment variables may be used to override the default configurations: - -| ENV VAR | DEFAULT VALUE | NOTES | -| ------------- |:-------------: |:-------------:| -| KUBERNETES_PORT_443_TCP_ADDR | kubernetes.default.svc.cluster.local | The hostname of the API server | -| KUBERNETES_PORT_443_TCP_PORT | 443 | API port number | -| CASSANDRA_SERVICE | cassandra | Default service name for lookup | -| POD_NAMESPACE | default | Default pod service namespace | -| K8S_ACCOUNT_TOKEN | /var/run/secrets/kubernetes.io/serviceaccount/token | Default path to service token | - -# Using - - -If no endpoints are discovered from the API the seeds configured in the cassandra.yaml file are used. - -# Provider limitations - -This Cassandra Provider implements `SeedProvider`. and utilizes `SimpleSnitch`. This limits a Cassandra Ring to a single Cassandra Datacenter and ignores Rack setup. Datastax provides more documentation on the use of [_SNITCHES_](https://docs.datastax.com/en/cassandra/3.x/cassandra/architecture/archSnitchesAbout.html). Further development is planned to -expand this capability. - -This in affect makes every node a seed provider, which is not a recommended best practice. This increases maintenance and reduces gossip performance. - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/cassandra/java/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/cassandra/java/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/cassandra/java/README.md) diff --git a/examples/storage/hazelcast/README.md b/examples/storage/hazelcast/README.md index 8940204a14b..fd46e275734 100644 --- a/examples/storage/hazelcast/README.md +++ b/examples/storage/hazelcast/README.md @@ -1,233 +1 @@ -## Cloud Native Deployments of Hazelcast using Kubernetes - -The following document describes the development of a _cloud native_ [Hazelcast](http://hazelcast.org/) deployment on Kubernetes. When we say _cloud native_ we mean an application which understands that it is running within a cluster manager, and uses this cluster management infrastructure to help implement the application. In particular, in this instance, a custom Hazelcast ```bootstrapper``` is used to enable Hazelcast to dynamically discover Hazelcast nodes that have already joined the cluster. - -Any topology changes are communicated and handled by Hazelcast nodes themselves. - -This document also attempts to describe the core components of Kubernetes: _Pods_, _Services_, and _Deployments_. - -### Prerequisites - -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. - -### A note for the impatient - -This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end. - -### Sources - -Source is freely available at: -* Hazelcast Discovery - https://github.com/pires/hazelcast-kubernetes-bootstrapper -* Dockerfile - https://github.com/pires/hazelcast-kubernetes -* Docker Trusted Build - https://quay.io/repository/pires/hazelcast-kubernetes - -### Simple Single Pod Hazelcast Node - -In Kubernetes, the atomic unit of an application is a [_Pod_](https://kubernetes.io/docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. - -In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition. - - -### Adding a Hazelcast Service - -In Kubernetes a _[Service](https://kubernetes.io/docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods. - -Here is the service description: - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - name: hazelcast - name: hazelcast -spec: - ports: - - port: 5701 - selector: - name: hazelcast -``` - -[Download example](hazelcast-service.yaml?raw=true) - - -The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. - -Create this service as follows: - -```sh -$ kubectl create -f examples/storage/hazelcast/hazelcast-service.yaml -``` - -### Adding replicated nodes - -The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster. - -In Kubernetes a _[_Deployment_](https://kubernetes.io/docs/user-guide/deployments.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of its set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with its desired state. - -Deployments will "adopt" existing pods that match their selector query, so let's create a Deployment with a single replica to adopt our existing Hazelcast Pod. - - - -```yaml -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: hazelcast - labels: - name: hazelcast -spec: - template: - metadata: - labels: - name: hazelcast - spec: - containers: - - name: hazelcast - image: quay.io/pires/hazelcast-kubernetes:0.8.0 - imagePullPolicy: Always - env: - - name: "DNS_DOMAIN" - value: "cluster.local" - ports: - - name: hazelcast - containerPort: 5701 -``` - -[Download example](hazelcast-deployment.yaml?raw=true) - - -You may note that we tell Kubernetes that the container exposes the `hazelcast` port. - -The bulk of the replication controller config is actually identical to the Hazelcast pod declaration above, it simply gives the controller a recipe to use when creating new pods. The other parts are the `selector` which contains the controller's selector query, and the `replicas` parameter which specifies the desired number of replicas, in this case 1. - -Last but not least, we set `DNS_DOMAIN` environment variable according to your Kubernetes clusters DNS configuration. - -Create this controller: - -```sh -$ kubectl create -f examples/storage/hazelcast/hazelcast-deployment.yaml -``` - -After the controller provisions successfully the pod, you can query the service endpoints: -```sh -$ kubectl get endpoints hazelcast -o yaml -apiVersion: v1 -kind: Endpoints -metadata: - creationTimestamp: 2017-03-15T09:40:11Z - labels: - name: hazelcast - name: hazelcast - namespace: default - resourceVersion: "65060" - selfLink: /api/v1/namespaces/default/endpoints/hazelcast - uid: 62645b71-0963-11e7-b39c-080027985ce6 -subsets: -- addresses: - - ip: 172.17.0.2 - nodeName: minikube - targetRef: - kind: Pod - name: hazelcast-4195412960-mgqtk - namespace: default - resourceVersion: "65058" - uid: 7043708f-0963-11e7-b39c-080027985ce6 - ports: - - port: 5701 - protocol: TCP - -``` - -You can see that the _Service_ has found the pod created by the replication controller. - -Now it gets even more interesting. Let's scale our cluster to 2 pods: -```sh -$ kubectl scale deployment hazelcast --replicas 2 -``` - -Now if you list the pods in your cluster, you should see two hazelcast pods: - -```sh -$ kubectl get deployment,pods -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deploy/hazelcast 2 2 2 2 2m - -NAME READY STATUS RESTARTS AGE -po/hazelcast-4195412960-0tl3w 1/1 Running 0 7s -po/hazelcast-4195412960-mgqtk 1/1 Running 0 2m -``` - -To prove that this all works, you can use the `log` command to examine the logs of one pod, for example: - -```sh -kubectl logs -f hazelcast-4195412960-0tl3w -2017-03-15 09:42:45.046 INFO 7 --- [ main] com.github.pires.hazelcast.Application : Starting Application on hazelcast-4195412960-0tl3w with PID 7 (/bootstrapper.jar started by root in /) -2017-03-15 09:42:45.060 INFO 7 --- [ main] com.github.pires.hazelcast.Application : No active profile set, falling back to default profiles: default -2017-03-15 09:42:45.128 INFO 7 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@14514713: startup date [Wed Mar 15 09:42:45 GMT 2017]; root of context hierarchy -2017-03-15 09:42:45.989 INFO 7 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup -2017-03-15 09:42:46.001 INFO 7 --- [ main] c.g.p.h.HazelcastDiscoveryController : Asking k8s registry at https://kubernetes.default.svc.cluster.local.. -2017-03-15 09:42:46.376 INFO 7 --- [ main] c.g.p.h.HazelcastDiscoveryController : Found 2 pods running Hazelcast. -2017-03-15 09:42:46.458 INFO 7 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.8] Interfaces is disabled, trying to pick one address from TCP-IP config addresses: [172.17.0.6, 172.17.0.2] -2017-03-15 09:42:46.458 INFO 7 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.8] Prefer IPv4 stack is true. -2017-03-15 09:42:46.464 INFO 7 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.8] Picked [172.17.0.6]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true -2017-03-15 09:42:46.484 INFO 7 --- [ main] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Hazelcast 3.8 (20170217 - d7998b4) starting at [172.17.0.6]:5701 -2017-03-15 09:42:46.484 INFO 7 --- [ main] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved. -2017-03-15 09:42:46.485 INFO 7 --- [ main] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Configured Hazelcast Serialization version : 1 -2017-03-15 09:42:46.679 INFO 7 --- [ main] c.h.s.i.o.impl.BackpressureRegulator : [172.17.0.6]:5701 [someGroup] [3.8] Backpressure is disabled -2017-03-15 09:42:47.069 INFO 7 --- [ main] com.hazelcast.instance.Node : [172.17.0.6]:5701 [someGroup] [3.8] Creating TcpIpJoiner -2017-03-15 09:42:47.182 INFO 7 --- [ main] c.h.s.i.o.impl.OperationExecutorImpl : [172.17.0.6]:5701 [someGroup] [3.8] Starting 2 partition threads -2017-03-15 09:42:47.189 INFO 7 --- [ main] c.h.s.i.o.impl.OperationExecutorImpl : [172.17.0.6]:5701 [someGroup] [3.8] Starting 3 generic threads (1 dedicated for priority tasks) -2017-03-15 09:42:47.197 INFO 7 --- [ main] com.hazelcast.core.LifecycleService : [172.17.0.6]:5701 [someGroup] [3.8] [172.17.0.6]:5701 is STARTING -2017-03-15 09:42:47.253 INFO 7 --- [cached.thread-3] c.hazelcast.nio.tcp.InitConnectionTask : [172.17.0.6]:5701 [someGroup] [3.8] Connecting to /172.17.0.2:5701, timeout: 0, bind-any: true -2017-03-15 09:42:47.262 INFO 7 --- [cached.thread-3] c.h.nio.tcp.TcpIpConnectionManager : [172.17.0.6]:5701 [someGroup] [3.8] Established socket connection between /172.17.0.6:58073 and /172.17.0.2:5701 -2017-03-15 09:42:54.260 INFO 7 --- [ration.thread-0] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Cluster version set to 3.8 -2017-03-15 09:42:54.262 INFO 7 --- [ration.thread-0] c.h.internal.cluster.ClusterService : [172.17.0.6]:5701 [someGroup] [3.8] - -Members [2] { - Member [172.17.0.2]:5701 - 170f6924-7888-442a-9875-ad4d25659a8a - Member [172.17.0.6]:5701 - b1b82bfa-86c2-4931-af57-325c10c03b3b this -} - -2017-03-15 09:42:56.285 INFO 7 --- [ main] com.hazelcast.core.LifecycleService : [172.17.0.6]:5701 [someGroup] [3.8] [172.17.0.6]:5701 is STARTED -2017-03-15 09:42:56.287 INFO 7 --- [ main] com.github.pires.hazelcast.Application : Started Application in 11.831 seconds (JVM running for 12.219) -``` - -Now let's scale our cluster to 4 nodes: -```sh -$ kubectl scale deployment hazelcast --replicas 4 -``` - -Examine the status again by checking a node's logs and you should see the 4 members connected. Something like: -``` -(...) - -Members [4] { - Member [172.17.0.2]:5701 - 170f6924-7888-442a-9875-ad4d25659a8a - Member [172.17.0.6]:5701 - b1b82bfa-86c2-4931-af57-325c10c03b3b this - Member [172.17.0.9]:5701 - 0c7530d3-1b5a-4f40-bd59-7187e43c1110 - Member [172.17.0.10]:5701 - ad5c3000-7fd0-4ce7-8194-e9b1c2ed6dda -} -``` - -### tl; dr; - -For those of you who are impatient, here is the summary of the commands we ran in this tutorial. - -```sh -kubectl create -f service.yaml -kubectl create -f deployment.yaml -kubectl scale deployment hazelcast --replicas 2 -kubectl scale deployment hazelcast --replicas 4 -``` - -### Hazelcast Discovery Source - -See [here](https://github.com/pires/hazelcast-kubernetes-bootstrapper/blob/master/src/main/java/com/github/pires/hazelcast/HazelcastDiscoveryController.java) - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/hazelcast/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/hazelcast/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/hazelcast/README.md) diff --git a/examples/storage/minio/README.md b/examples/storage/minio/README.md index c2c36b3355c..43b3a6e6b98 100644 --- a/examples/storage/minio/README.md +++ b/examples/storage/minio/README.md @@ -1,341 +1 @@ -# Cloud Native Deployment of Minio using Kubernetes - -## Table of Contents - -- [Introduction](#introduction) -- [Prerequisites](#prerequisites) -- [Minio Standalone Server Deployment](#minio-standalone-server-deployment) - - [Standalone Quickstart](#standalone-quickstart) - - [Step 1: Create Persistent Volume Claim](#step-1-create-persistent-volume-claim) - - [Step 2: Create Deployment](#step-2-create-minio-deployment) - - [Step 3: Create LoadBalancer Service](#step-3-create-minio-service) - - [Step 4: Resource cleanup](#step-4-resource-cleanup) -- [Minio Distributed Server Deployment](#minio-distributed-server-deployment) - - [Distributed Quickstart](#distributed-quickstart) - - [Step 1: Create Minio Headless Service](#step-1-create-minio-headless-service) - - [Step 2: Create Minio Statefulset](#step-2-create-minio-statefulset) - - [Step 3: Create LoadBalancer Service](#step-3-create-minio-service) - - [Step 4: Resource cleanup](#step-4-resource-cleanup) - -## Introduction -Minio is an AWS S3 compatible, object storage server built for cloud applications and devops. Minio is _cloud native_, meaning Minio understands that it is running within a cluster manager, and uses the cluster management infrastructure for allocation of compute and storage resources. - -## Prerequisites - -This example assumes that you have a Kubernetes version >=1.4 cluster installed and running, and that you have installed the [`kubectl`](https://kubernetes.io/docs/tasks/kubectl/install/) command line tool in your path. Please see the -[getting started guides](https://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. - -## Minio Standalone Server Deployment - -The following section describes the process to deploy standalone [Minio](https://minio.io/) server on Kubernetes. The deployment uses the [official Minio Docker image](https://hub.docker.com/r/minio/minio/~/dockerfile/) from Docker Hub. - -This section uses following core components of Kubernetes: - -- [_Pods_](https://kubernetes.io/docs/user-guide/pods/) -- [_Services_](https://kubernetes.io/docs/user-guide/services/) -- [_Deployments_](https://kubernetes.io/docs/user-guide/deployments/) -- [_Persistent Volume Claims_](https://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims) - -### Standalone Quickstart - -Run the below commands to get started quickly - -```sh -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-pvc.yaml?raw=true -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-deployment.yaml?raw=true -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-service.yaml?raw=true -``` - -### Step 1: Create Persistent Volume Claim - -Minio needs persistent storage to store objects. If there is no -persistent storage, the data stored in Minio instance will be stored in the container file system and will be wiped off as soon as the container restarts. - -Create a persistent volume claim (PVC) to request storage for the Minio instance. Kubernetes looks out for PVs matching the PVC request in the cluster and binds it to the PVC automatically. - -This is the PVC description. - -```sh -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - # This name uniquely identifies the PVC. Will be used in deployment below. - name: minio-pv-claim - annotations: - volume.alpha.kubernetes.io/storage-class: anything - labels: - app: minio-storage-claim -spec: - # Read more about access modes here: http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes - accessModes: - - ReadWriteOnce - resources: - # This is the request for storage. Should be available in the cluster. - requests: - storage: 10Gi -``` - -Create the PersistentVolumeClaim - -```sh -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-pvc.yaml?raw=true -persistentvolumeclaim "minio-pv-claim" created -``` - -### Step 2: Create Minio Deployment - -A deployment encapsulates replica sets and pods — so, if a pod goes down, replication controller makes sure another pod comes up automatically. This way you won’t need to bother about pod failures and will have a stable Minio service available. - -This is the deployment description. - -```sh -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - # This name uniquely identifies the Deployment - name: minio-deployment -spec: - strategy: - type: Recreate - template: - metadata: - labels: - # Label is used as selector in the service. - app: minio - spec: - # Refer to the PVC created earlier - volumes: - - name: storage - persistentVolumeClaim: - # Name of the PVC created earlier - claimName: minio-pv-claim - containers: - - name: minio - # Pulls the default Minio image from Docker Hub - image: minio/minio:latest - args: - - server - - /storage - env: - # Minio access key and secret key - - name: MINIO_ACCESS_KEY - value: "minio" - - name: MINIO_SECRET_KEY - value: "minio123" - ports: - - containerPort: 9000 - hostPort: 9000 - # Mount the volume into the pod - volumeMounts: - - name: storage # must match the volume name, above - mountPath: "/storage" -``` - -Create the Deployment - -```sh -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-deployment.yaml?raw=true -deployment "minio-deployment" created -``` - -### Step 3: Create Minio Service - -Now that you have a Minio deployment running, you may either want to access it internally (within the cluster) or expose it as a Service onto an external (outside of your cluster, maybe public internet) IP address, depending on your use case. You can achieve this using Services. There are 3 major service types — default type is ClusterIP, which exposes a service to connection from inside the cluster. NodePort and LoadBalancer are two types that expose services to external traffic. - -In this example, we expose the Minio Deployment by creating a LoadBalancer service. This is the service description. - -```sh -apiVersion: v1 -kind: Service -metadata: - name: minio-service -spec: - type: LoadBalancer - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - selector: - app: minio -``` -Create the Minio service - -```sh -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-standalone-service.yaml?raw=true -service "minio-service" created -``` - -The `LoadBalancer` service takes couple of minutes to launch. To check if the service was created successfully, run the command - -```sh -kubectl get svc minio-service -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -minio-service 10.55.248.23 104.199.249.165 9000:31852/TCP 1m -``` - -### Step 4: Resource cleanup - -Once you are done, cleanup the cluster using -```sh -kubectl delete deployment minio-deployment \ -&& kubectl delete pvc minio-pv-claim \ -&& kubectl delete svc minio-service -``` - -## Minio Distributed Server Deployment - -The following document describes the process to deploy [distributed Minio](https://docs.minio.io/docs/distributed-minio-quickstart-guide) server on Kubernetes. This example uses the [official Minio Docker image](https://hub.docker.com/r/minio/minio/~/dockerfile/) from Docker Hub. - -This example uses following core components of Kubernetes: - -- [_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod/) -- [_Services_](https://kubernetes.io/docs/concepts/services-networking/service/) -- [_Statefulsets_](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/) - -### Distributed Quickstart - -Run the below commands to get started quickly - -```sh -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-headless-service.yaml?raw=true -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-statefulset.yaml?raw=true -kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-service.yaml?raw=true -``` - -### Step 1: Create Minio Headless Service - -Headless Service controls the domain within which StatefulSets are created. The domain managed by this Service takes the form: `$(service name).$(namespace).svc.cluster.local` (where “cluster.local” is the cluster domain), and the pods in this domain take the form: `$(pod-name-{i}).$(service name).$(namespace).svc.cluster.local`. This is required to get a DNS resolvable URL for each of the pods created within the Statefulset. - -This is the Headless service description. - -```sh -apiVersion: v1 -kind: Service -metadata: - name: minio - labels: - app: minio -spec: - clusterIP: None - ports: - - port: 9000 - name: minio - selector: - app: minio -``` - -Create the Headless Service - -```sh -$ kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-headless-service.yaml?raw=true -service "minio" created -``` - -### Step 2: Create Minio Statefulset - -A StatefulSet provides a deterministic name and a unique identity to each pod, making it easy to deploy stateful distributed applications. To launch distributed Minio you need to pass drive locations as parameters to the minio server command. Then, you’ll need to run the same command on all the participating pods. StatefulSets offer a perfect way to handle this requirement. - -This is the Statefulset description. - -```sh -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: minio -spec: - serviceName: minio - replicas: 4 - template: - metadata: - annotations: - pod.alpha.kubernetes.io/initialized: "true" - labels: - app: minio - spec: - containers: - - name: minio - env: - - name: MINIO_ACCESS_KEY - value: "minio" - - name: MINIO_SECRET_KEY - value: "minio123" - image: minio/minio:latest - args: - - server - - http://minio-0.minio.default.svc.cluster.local/data - - http://minio-1.minio.default.svc.cluster.local/data - - http://minio-2.minio.default.svc.cluster.local/data - - http://minio-3.minio.default.svc.cluster.local/data - ports: - - containerPort: 9000 - hostPort: 9000 - # These volume mounts are persistent. Each pod in the Statefulset - # gets a volume mounted based on this field. - volumeMounts: - - name: data - mountPath: /data - # These are converted to volume claims by the controller - # and mounted at the paths mentioned above. - volumeClaimTemplates: - - metadata: - name: data - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi -``` - -Create the Statefulset - -```sh -$ kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-statefulset.yaml?raw=true -statefulset "minio" created -``` - -### Step 3: Create Minio Service - -Now that you have a Minio statefulset running, you may either want to access it internally (within the cluster) or expose it as a Service onto an external (outside of your cluster, maybe public internet) IP address, depending on your use case. You can achieve this using Services. There are 3 major service types — default type is ClusterIP, which exposes a service to connection from inside the cluster. NodePort and LoadBalancer are two types that expose services to external traffic. - -In this example, we expose the Minio Deployment by creating a LoadBalancer service. This is the service description. - -```sh -apiVersion: v1 -kind: Service -metadata: - name: minio-service -spec: - type: LoadBalancer - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - selector: - app: minio -``` -Create the Minio service - -```sh -$ kubectl create -f https://github.com/kubernetes/kubernetes/blob/master/examples/storage/minio/minio-distributed-service.yaml?raw=true -service "minio-service" created -``` - -The `LoadBalancer` service takes couple of minutes to launch. To check if the service was created successfully, run the command - -```sh -$ kubectl get svc minio-service -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -minio-service 10.55.248.23 104.199.249.165 9000:31852/TCP 1m -``` - -### Step 4: Resource cleanup - -You can cleanup the cluster using -```sh -kubectl delete statefulset minio \ -&& kubectl delete svc minio \ -&& kubectl delete svc minio-service -``` +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/minio/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/minio/README.md) diff --git a/examples/storage/mysql-galera/README.md b/examples/storage/mysql-galera/README.md index 02846ea477f..1930bad244a 100644 --- a/examples/storage/mysql-galera/README.md +++ b/examples/storage/mysql-galera/README.md @@ -1,137 +1 @@ -## Galera Replication for MySQL on Kubernetes - -This document explains a simple demonstration example of running MySQL synchronous replication using Galera, specifically, Percona XtraDB cluster. The example is simplistic and used a fixed number (3) of nodes but the idea can be built upon and made more dynamic as Kubernetes matures. - -### Prerequisites - -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. - -Also, this example requires the image found in the ```image``` directory. For your convenience, it is built and available on Docker's public image repository as ```capttofu/percona_xtradb_cluster_5_6```. It can also be built which would merely require that the image in the pod or replication controller files is updated. - -This example was tested on OS X with a Galera cluster running on VMWare using the fine repo developed by Paulo Pires [https://github.com/pires/kubernetes-vagrant-coreos-cluster] and client programs built for OS X. - -### Basic concept - -The basic idea is this: three replication controllers with a single pod, corresponding services, and a single overall service to connect to all three nodes. One of the important design goals of MySQL replication and/or clustering is that you don't want a single-point-of-failure, hence the need to distribute each node or slave across hosts or even geographical locations. Kubernetes is well-suited for facilitating this design pattern using the service and replication controller configuration files in this example. - -By defaults, there are only three pods (hence replication controllers) for this cluster. This number can be increased using the variable NUM_NODES, specified in the replication controller configuration file. It's important to know the number of nodes must always be odd. - -When the replication controller is created, it results in the corresponding container to start, run an entrypoint script that installs the MySQL system tables, set up users, and build up a list of servers that is used with the galera parameter ```wsrep_cluster_address```. This is a list of running nodes that galera uses for election of a node to obtain SST (Single State Transfer) from. - -Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initially pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly). - -First, create the overall cluster service that will be used to connect to the cluster: - -```kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml``` - -Create the service and replication controller for the first node: - -```kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml``` - -### Create services and controllers for the remaining nodes - -Repeat the same previous steps for ```pxc-node2``` and ```pxc-node3``` - -When complete, you should be able connect with a MySQL client to the IP address - service ```pxc-cluster``` to find a working cluster - -### An example of creating a cluster - -Shown below are examples of Using ```kubectl``` from within the ```./examples/storage/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed - -``` -$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml -services/pxc-cluster - -$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml -services/pxc-node1 -replicationcontrollers/pxc-node1 - -$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml -services/pxc-node2 -replicationcontrollers/pxc-node2 - -$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml -services/pxc-node3 -replicationcontrollers/pxc-node3 - -``` - -### Confirm a running cluster - -Verify everything is running: - -``` -$ kubectl get rc,pods,services -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -pxc-node1 pxc-node1 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node1 1 -pxc-node2 pxc-node2 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node2 1 -pxc-node3 pxc-node3 capttofu/percona_xtradb_cluster_5_6:beta name=pxc-node3 1 -NAME READY STATUS RESTARTS AGE -pxc-node1-h6fqr 1/1 Running 0 41m -pxc-node2-sfqm6 1/1 Running 0 41m -pxc-node3-017b3 1/1 Running 0 40m -NAME LABELS SELECTOR IP(S) PORT(S) -pxc-cluster unit=pxc-cluster 10.100.179.58 3306/TCP -pxc-node1 name=pxc-node1 10.100.217.202 3306/TCP - 4444/TCP - 4567/TCP - 4568/TCP -pxc-node2 name=pxc-node2 10.100.47.212 3306/TCP - 4444/TCP - 4567/TCP - 4568/TCP -pxc-node3 name=pxc-node3 10.100.200.14 3306/TCP - 4444/TCP - 4567/TCP - 4568/TCP - -``` - -The cluster should be ready for use! - -### Connecting to the cluster - -Using the name of ```pxc-cluster``` service running interactively using ```kubernetes exec```, it is possible to connect to any of the pods using the mysql client on the pod's container to verify the cluster size, which should be ```3```. In this example below, pxc-node3 replication controller is chosen, and to find out the pod name, ```kubectl get pods``` and ```awk``` are employed: - -``` -$ kubectl get pods|grep pxc-node3|awk '{ print $1 }' -pxc-node3-0b5mc - -$ kubectl exec pxc-node3-0b5mc -i -t -- mysql -u root -p -h pxc-cluster - -Enter password: -Welcome to the MySQL monitor. Commands end with ; or \g. -Your MySQL connection id is 5 -Server version: 5.6.24-72.2-56-log Percona XtraDB Cluster (GPL), Release rel72.2, Revision 43abf03, WSREP version 25.11, wsrep_25.11 - -Copyright (c) 2009-2015 Percona LLC and/or its affiliates -Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. - -Oracle is a registered trademark of Oracle Corporation and/or its -affiliates. Other names may be trademarks of their respective -owners. - -Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. - -mysql> show status like 'wsrep_cluster_size'; -+--------------------+-------+ -| Variable_name | Value | -+--------------------+-------+ -| wsrep_cluster_size | 3 | -+--------------------+-------+ -1 row in set (0.06 sec) - -``` - -At this point, there is a working cluster that can begin being used via the pxc-cluster service IP address! - -### TODO - -This setup certainly can become more fluid and dynamic. One idea is to perhaps use an etcd container to store information about node state. Originally, there was a read-only kubernetes API available to each container but that has since been removed. Also, Kelsey Hightower is working on moving the functionality of confd to Kubernetes. This could replace the shell duct tape that builds the cluster configuration file for the image. - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/mysql-galera/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/mysql-galera/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/mysql-galera/README.md) diff --git a/examples/storage/redis/README.md b/examples/storage/redis/README.md index 04a0e802e09..c24b9702f51 100644 --- a/examples/storage/redis/README.md +++ b/examples/storage/redis/README.md @@ -1,133 +1 @@ -## Reliable, Scalable Redis on Kubernetes - -The following document describes the deployment of a reliable, multi-node Redis on Kubernetes. It deploys a master with replicated slaves, as well as replicated redis sentinels which are use for health checking and failover. - -### Prerequisites - -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. - -### A note for the impatient - -This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end. - -### Turning up an initial master/sentinel pod. - -A [_Pod_](https://kubernetes.io/docs/user-guide/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. - -We will use the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```. - -Here is the config for the initial master and sentinel pod: [redis-master.yaml](redis-master.yaml) - - -Create this master as follows: - -```sh -kubectl create -f examples/storage/redis/redis-master.yaml -``` - -### Turning up a sentinel service - -In Kubernetes a [_Service_](https://kubernetes.io/docs/user-guide/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. - -In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur. - -Here is the definition of the sentinel service: [redis-sentinel-service.yaml](redis-sentinel-service.yaml) - -Create this service: - -```sh -kubectl create -f examples/storage/redis/redis-sentinel-service.yaml -``` - -### Turning up replicated redis servers - -So far, what we have done is pretty manual, and not very fault-tolerant. If the ```redis-master``` pod that we previously created is destroyed for some reason (e.g. a machine dying) our Redis service goes away with it. - -In Kubernetes a [_Replication Controller_](https://kubernetes.io/docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. - -Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. Here is the replication controller config: [redis-controller.yaml](redis-controller.yaml) - -The bulk of this controller config is actually identical to the redis-master pod definition above. It forms the template or "cookie cutter" that defines what it means to be a member of this set. - -Create this controller: - -```sh -kubectl create -f examples/storage/redis/redis-controller.yaml -``` - -We'll do the same thing for the sentinel. Here is the controller config: [redis-sentinel-controller.yaml](redis-sentinel-controller.yaml) - -We create it as follows: - -```sh -kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml -``` - -### Scale our replicated pods - -Initially creating those pods didn't actually do anything, since we only asked for one sentinel and one redis server, and they already existed, nothing changed. Now we will add more replicas: - -```sh -kubectl scale rc redis --replicas=3 -``` - -```sh -kubectl scale rc redis-sentinel --replicas=3 -``` - -This will create two additional replicas of the redis server and two additional replicas of the redis sentinel. - -Unlike our original redis-master pod, these pods exist independently, and they use the ```redis-sentinel-service``` that we defined above to discover and join the cluster. - -### Delete our manual pod - -The final step in the cluster turn up is to delete the original redis-master pod that we created manually. While it was useful for bootstrapping discovery in the cluster, we really don't want the lifespan of our sentinel to be tied to the lifespan of one of our redis servers, and now that we have a successful, replicated redis sentinel service up and running, the binding is unnecessary. - -Delete the master as follows: - -```sh -kubectl delete pods redis-master -``` - -Now let's take a close look at what happens after this pod is deleted. There are three things that happen: - - 1. The redis replication controller notices that its desired state is 3 replicas, but there are currently only 2 replicas, and so it creates a new redis server to bring the replica count back up to 3 - 2. The redis-sentinel replication controller likewise notices the missing sentinel, and also creates a new sentinel. - 3. The redis sentinels themselves, realize that the master has disappeared from the cluster, and begin the election procedure for selecting a new master. They perform this election and selection, and chose one of the existing redis server replicas to be the new master. - -### Conclusion - -At this point we now have a reliable, scalable Redis installation. By scaling the replication controller for redis servers, we can increase or decrease the number of read-slaves in our cluster. Likewise, if failures occur, the redis-sentinels will perform master election and select a new master. - -**NOTE:** since redis 3.2 some security measures (bind to 127.0.0.1 and `--protected-mode`) are enabled by default. Please read about this in http://antirez.com/news/96 - - -### tl; dr - -For those of you who are impatient, here is the summary of commands we ran in this tutorial: - -``` -# Create a bootstrap master -kubectl create -f examples/storage/redis/redis-master.yaml - -# Create a service to track the sentinels -kubectl create -f examples/storage/redis/redis-sentinel-service.yaml - -# Create a replication controller for redis servers -kubectl create -f examples/storage/redis/redis-controller.yaml - -# Create a replication controller for redis sentinels -kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml - -# Scale both replication controllers -kubectl scale rc redis --replicas=3 -kubectl scale rc redis-sentinel --replicas=3 - -# Delete the original master pod -kubectl delete pods redis-master -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/redis/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/redis/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/redis/README.md) diff --git a/examples/storage/rethinkdb/README.md b/examples/storage/rethinkdb/README.md index 061854c4c64..f15a56838de 100644 --- a/examples/storage/rethinkdb/README.md +++ b/examples/storage/rethinkdb/README.md @@ -1,130 +1 @@ -RethinkDB Cluster on Kubernetes -============================== - -Setting up a [rethinkdb](http://rethinkdb.com/) cluster on [kubernetes](http://kubernetes.io) - -**Features** - - * Auto configuration cluster by querying info from k8s - * Simple - -Quick start ------------ - -**Step 1** - -Rethinkdb will discover its peer using endpoints provided by kubernetes service, -so first create a service so the following pod can query its endpoint - -```sh -$kubectl create -f examples/storage/rethinkdb/driver-service.yaml -``` - -check out: - -```sh -$kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -rethinkdb-driver 10.0.27.114 28015/TCP db=rethinkdb 10m -[...] -``` - -**Step 2** - -start the first server in the cluster - -```sh -$kubectl create -f examples/storage/rethinkdb/rc.yaml -``` - -Actually, you can start servers as many as you want at one time, just modify the `replicas` in `rc.ymal` - -check out again: - -```sh -$kubectl get pods -NAME READY REASON RESTARTS AGE -[...] -rethinkdb-rc-r4tb0 1/1 Running 0 1m -``` - -**Done!** - - ---- - -Scale ------ - -You can scale up your cluster using `kubectl scale`. The new pod will join to the existing cluster automatically, for example - - -```sh -$kubectl scale rc rethinkdb-rc --replicas=3 -scaled - -$kubectl get pods -NAME READY REASON RESTARTS AGE -[...] -rethinkdb-rc-f32c5 1/1 Running 0 1m -rethinkdb-rc-m4d50 1/1 Running 0 1m -rethinkdb-rc-r4tb0 1/1 Running 0 3m -``` - -Admin ------ - -You need a separate pod (labeled as role:admin) to access Web Admin UI - -```sh -kubectl create -f examples/storage/rethinkdb/admin-pod.yaml -kubectl create -f examples/storage/rethinkdb/admin-service.yaml -``` - -find the service - -```console -$kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -[...] -rethinkdb-admin 10.0.131.19 104.197.19.120 8080/TCP db=rethinkdb,role=admin 10m -rethinkdb-driver 10.0.27.114 28015/TCP db=rethinkdb 20m -``` - -We request an external load balancer in the [admin-service.yaml](admin-service.yaml) file: - -``` -type: LoadBalancer -``` - -The external load balancer allows us to access the service from outside the firewall via an external IP, 104.197.19.120 in this case. - -Note that you may need to create a firewall rule to allow the traffic, assuming you are using Google Compute Engine: - -```console -$ gcloud compute firewall-rules create rethinkdb --allow=tcp:8080 -``` - -Now you can open a web browser and access to *http://104.197.19.120:8080* to manage your cluster. - - - -**Why not just using pods in replicas?** - -This is because kube-proxy will act as a load balancer and send your traffic to different server, -since the ui is not stateless when playing with Web Admin UI will cause `Connection not open on server` error. - - -- - - - -**BTW** - - * `gen_pod.sh` is using to generate pod templates for my local cluster, -the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](https://kubernetes.io/docs/user-guide/node-selection/) - - * see [antmanler/rethinkdb-k8s](https://github.com/antmanler/rethinkdb-k8s) for detail - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/rethinkdb/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/rethinkdb/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/rethinkdb/README.md) diff --git a/examples/storage/vitess/README.md b/examples/storage/vitess/README.md index dd48248ea82..6b350d8e511 100644 --- a/examples/storage/vitess/README.md +++ b/examples/storage/vitess/README.md @@ -1,113 +1 @@ -## Vitess Example - -This example shows how to run a [Vitess](http://vitess.io) cluster in Kubernetes. -Vitess is a MySQL clustering system developed at YouTube that makes sharding -transparent to the application layer. It also makes scaling MySQL within -Kubernetes as simple as launching more pods. - -The example brings up a database with 2 shards, and then runs a pool of -[sharded guestbook](https://github.com/youtube/vitess/tree/master/examples/kubernetes/guestbook) -pods. The guestbook app was ported from the original -[guestbook](../../../examples/guestbook-go/) -example found elsewhere in this tree, modified to use Vitess as the backend. - -For a more detailed, step-by-step explanation of this example setup, see the -[Vitess on Kubernetes](http://vitess.io/getting-started/) guide. - -### Prerequisites - -You'll need to install [Go 1.4+](https://golang.org/doc/install) to build -`vtctlclient`, the command-line admin tool for Vitess. - -We also assume you have a running Kubernetes cluster with `kubectl` pointing to -it by default. See the [Getting Started guides](https://kubernetes.io/docs/getting-started-guides/) -for how to get to that point. Note that your Kubernetes cluster needs to have -enough resources (CPU+RAM) to schedule all the pods. By default, this example -requires a cluster-wide total of at least 6 virtual CPUs and 10GiB RAM. You can -tune these requirements in the -[resource limits](https://kubernetes.io/docs/user-guide/compute-resources.md) -section of each YAML file. - -Lastly, you need to open ports 30000-30001 (for the Vitess admin daemon) and 80 (for -the guestbook app) in your firewall. See the -[Services and Firewalls](https://kubernetes.io/docs/user-guide/services-firewalls.md) -guide for examples of how to do that. - -### Configure site-local settings - -Run the `configure.sh` script to generate a `config.sh` file, which will be used -to customize your cluster settings. - -``` console -./configure.sh -``` - -Currently, we have out-of-the-box support for storing -[backups](http://vitess.io/user-guide/backup-and-restore.html) in -[Google Cloud Storage](https://cloud.google.com/storage/). -If you're using GCS, fill in the fields requested by the configure script. -Note that your Kubernetes cluster must be running on instances with the -`storage-rw` scope for this to work. With Container Engine, you can do this by -passing `--scopes storage-rw` to the `glcoud container clusters create` command. - -For other platforms, you'll need to choose the `file` backup storage plugin, -and mount a read-write network volume into the `vttablet` and `vtctld` pods. -For example, you can mount any storage service accessible through NFS into a -Kubernetes volume. Then provide the mount path to the configure script here. - -If you prefer to skip setting up a backup volume for the purpose of this example, -you can choose `file` mode and set the path to `/tmp`. - -### Start Vitess - -``` console -./vitess-up.sh -``` - -This will run through the steps to bring up Vitess. At the end, you should see -something like this: - -``` console -**************************** -* Complete! -* Use the following line to make an alias to kvtctl: -* alias kvtctl='$GOPATH/bin/vtctlclient -server 104.197.47.173:30001' -* See the vtctld UI at: http://104.197.47.173:30000 -**************************** -``` - -### Start the Guestbook app - -``` console -./guestbook-up.sh -``` - -The guestbook service is configured with `type: LoadBalancer` to tell Kubernetes -to expose it on an external IP. It may take a minute to set up, but you should -soon see the external IP show up under the internal one like this: - -``` console -$ kubectl get service guestbook -NAME LABELS SELECTOR IP(S) PORT(S) -guestbook name=guestbook 10.67.253.173 80/TCP - 104.197.151.132 -``` - -Visit the external IP in your browser to view the guestbook. Note that in this -modified guestbook, there are multiple pages to demonstrate range-based sharding -in Vitess. Each page number is assigned to one of the shards using a -[consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing) scheme. - -### Tear down - -``` console -./guestbook-down.sh -./vitess-down.sh -``` - -You may also want to remove any firewall rules you created. - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/vitess/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/vitess/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/vitess/README.md) diff --git a/examples/storm/README.md b/examples/storm/README.md index bc8668bd38a..0867b35f185 100644 --- a/examples/storm/README.md +++ b/examples/storm/README.md @@ -1,172 +1 @@ -# Storm example - -Following this example, you will create a functional [Apache -Storm](http://storm.apache.org/) cluster using Kubernetes and -[Docker](http://docker.io). - -You will setup an [Apache ZooKeeper](http://zookeeper.apache.org/) -service, a Storm master service (a.k.a. Nimbus server), and a set of -Storm workers (a.k.a. supervisors). - -For the impatient expert, jump straight to the [tl;dr](#tldr) -section. - -### Sources - -Source is freely available at: -* Docker image - https://github.com/mattf/docker-storm -* Docker Trusted Build - https://registry.hub.docker.com/search?q=mattf/storm - -## Step Zero: Prerequisites - -This example assumes you have a Kubernetes cluster installed and -running, and that you have installed the ```kubectl``` command line -tool somewhere in your path. Please see the [getting -started](https://kubernetes.io/docs/getting-started-guides/) for installation -instructions for your platform. - -## Step One: Start your ZooKeeper service - -ZooKeeper is a distributed coordination [service](https://kubernetes.io/docs/user-guide/services.md) that Storm uses as a -bootstrap and for state storage. - -Use the [`examples/storm/zookeeper.json`](zookeeper.json) file to create a [pod](https://kubernetes.io/docs/user-guide/pods.md) running -the ZooKeeper service. - -```sh -$ kubectl create -f examples/storm/zookeeper.json -``` - -Then, use the [`examples/storm/zookeeper-service.json`](zookeeper-service.json) file to create a -logical service endpoint that Storm can use to access the ZooKeeper -pod. - -```sh -$ kubectl create -f examples/storm/zookeeper-service.json -``` - -You should make sure the ZooKeeper pod is Running and accessible -before proceeding. - -### Check to see if ZooKeeper is running - -```sh -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -zookeeper 1/1 Running 0 43s -``` - -### Check to see if ZooKeeper is accessible - -```console -$ kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -zookeeper 10.254.139.141 2181/TCP name=zookeeper 10m -kubernetes 10.0.0.2 443/TCP 1d - -$ echo ruok | nc 10.254.139.141 2181; echo -imok -``` - -## Step Two: Start your Nimbus service - -The Nimbus service is the master (or head) service for a Storm -cluster. It depends on a functional ZooKeeper service. - -Use the [`examples/storm/storm-nimbus.json`](storm-nimbus.json) file to create a pod running -the Nimbus service. - -```sh -$ kubectl create -f examples/storm/storm-nimbus.json -``` - -Then, use the [`examples/storm/storm-nimbus-service.json`](storm-nimbus-service.json) file to -create a logical service endpoint that Storm workers can use to access -the Nimbus pod. - -```sh -$ kubectl create -f examples/storm/storm-nimbus-service.json -``` - -Ensure that the Nimbus service is running and functional. - -### Check to see if Nimbus is running and accessible - -```sh -$ kubectl get services -NAME LABELS SELECTOR IP(S) PORT(S) -kubernetes component=apiserver,provider=kubernetes 10.254.0.2 443 -zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181 -nimbus name=nimbus name=nimbus 10.254.115.208 6627 - -$ sudo docker run -it -w /opt/apache-storm mattf/storm-base sh -c '/configure.sh 10.254.139.141 10.254.115.208; ./bin/storm list' -... -No topologies running. -``` - -## Step Three: Start your Storm workers - -The Storm workers (or supervisors) do the heavy lifting in a Storm -cluster. They run your stream processing topologies and are managed by -the Nimbus service. - -The Storm workers need both the ZooKeeper and Nimbus services to be -running. - -Use the [`examples/storm/storm-worker-controller.json`](storm-worker-controller.json) file to create a -[replication controller](https://kubernetes.io/docs/user-guide/replication-controller.md) that manages the worker pods. - -```sh -$ kubectl create -f examples/storm/storm-worker-controller.json -``` - -### Check to see if the workers are running - -One way to check on the workers is to get information from the -ZooKeeper service about how many clients it has. - -```sh -$ echo stat | nc 10.254.139.141 2181; echo -Zookeeper version: 3.4.6--1, built on 10/23/2014 14:18 GMT -Clients: - /192.168.48.0:44187[0](queued=0,recved=1,sent=0) - /192.168.45.0:39568[1](queued=0,recved=14072,sent=14072) - /192.168.86.1:57591[1](queued=0,recved=34,sent=34) - /192.168.8.0:50375[1](queued=0,recved=34,sent=34) - -Latency min/avg/max: 0/2/2570 -Received: 23199 -Sent: 23198 -Connections: 4 -Outstanding: 0 -Zxid: 0xa39 -Mode: standalone -Node count: 13 -``` - -There should be one client from the Nimbus service and one per -worker. Ideally, you should get ```stat``` output from ZooKeeper -before and after creating the replication controller. - -(Pull requests welcome for alternative ways to validate the workers) - -## tl;dr - -```kubectl create -f zookeeper.json``` - -```kubectl create -f zookeeper-service.json``` - -Make sure the ZooKeeper Pod is running (use: ```kubectl get pods```). - -```kubectl create -f storm-nimbus.json``` - -```kubectl create -f storm-nimbus-service.json``` - -Make sure the Nimbus Pod is running. - -```kubectl create -f storm-worker-controller.json``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storm/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storm/README.md](https://github.com/kubernetes/examples/blob/master/staging/storm/README.md) diff --git a/examples/sysdig-cloud/README.md b/examples/sysdig-cloud/README.md index 14d431503b4..23678fe16ac 100644 --- a/examples/sysdig-cloud/README.md +++ b/examples/sysdig-cloud/README.md @@ -1,27 +1 @@ -[Sysdig Cloud](http://www.sysdig.com/) is a monitoring, alerting, and troubleshooting platform designed to natively support containerized and service-oriented applications. - -Sysdig Cloud comes with built-in, first class support for Kubernetes. In order to instrument your Kubernetes environment with Sysdig Cloud, you simply need to install the Sysdig Cloud agent container on each underlying host in your Kubernetes cluster. Sysdig Cloud will automatically begin monitoring all of your hosts, apps, pods, and services, and will also automatically connect to the Kubernetes API to pull relevant metadata about your environment. - -# Example Installation Files - -Provided here are two example sysdig.yaml files that can be used to automatically deploy the Sysdig Cloud agent container across a Kubernetes cluster. - -The recommended method is using daemon sets - minimum kubernetes version 1.1.1. - -If daemon sets are not available, then the replication controller method can be used (based on [this hack](https://stackoverflow.com/questions/33377054/how-to-require-one-pod-per-minion-kublet-when-configuring-a-replication-controll/33381862#33381862 )). - -# Latest Files - -See here for the latest maintained and updated versions of these example files: -https://github.com/draios/sysdig-cloud-scripts/tree/master/agent_deploy/kubernetes - -# Install instructions - -Please see the Sysdig Cloud support site for the latest documentation: -http://support.sysdigcloud.com/hc/en-us/sections/200959909 - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/sysdig-cloud/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/sysdig-cloud/README.md](https://github.com/kubernetes/examples/blob/master/staging/sysdig-cloud/README.md) diff --git a/examples/volumes/aws_ebs/README.md b/examples/volumes/aws_ebs/README.md index 87ee0fc765d..3edf78fef45 100644 --- a/examples/volumes/aws_ebs/README.md +++ b/examples/volumes/aws_ebs/README.md @@ -1,37 +1 @@ -This is a simple web server pod which serves HTML from an AWS EBS -volume. - -If you did not use kube-up script, make sure that your minions have the following IAM permissions ([Amazon IAM Roles](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role-console)): - -```shell - ec2:AttachVolume - ec2:DetachVolume - ec2:DescribeInstances - ec2:DescribeVolumes -``` - -Create a volume in the same region as your node. - -Add your volume information in the pod description file aws-ebs-web.yaml then create the pod: - -```shell - $ kubectl create -f examples/volumes/aws_ebs/aws-ebs-web.yaml -``` - -Add some data to the volume if is empty: - -```sh - $ echo "Hello World" >& /var/lib/kubelet/plugins/kubernetes.io/aws-ebs/mounts/aws/{Region}/{Volume ID}/index.html -``` - -You should now be able to query your web server: - -```sh - $ curl - $ Hello World -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/aws_ebs/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/aws_ebs/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/aws_ebs/README.md) diff --git a/examples/volumes/azure_disk/README.md b/examples/volumes/azure_disk/README.md index 35edc7ca8d4..f495261e3dd 100644 --- a/examples/volumes/azure_disk/README.md +++ b/examples/volumes/azure_disk/README.md @@ -1,22 +1 @@ -# How to Use it? - -On Azure VM, create a Pod using the volume spec based on [azure](azure.yaml). - -In the pod, you need to provide the following information: - -- *diskName*: (required) the name of the VHD blob object. -- *diskURI*: (required) the URI of the vhd blob object. -- *cachingMode*: (optional) disk caching mode. Must be one of None, ReadOnly, or ReadWrite. Default is None. -- *fsType*: (optional) the filesytem type to mount. Default is ext4. -- *readOnly*: (optional) whether the filesystem is used as readOnly. Default is false. - - -Launch the Pod: - -```console - # kubectl create -f examples/volumes/azure_disk/azure.yaml -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/azure_disk/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_disk/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_disk/README.md) diff --git a/examples/volumes/azure_file/README.md b/examples/volumes/azure_file/README.md index 165a581664a..007a6841bd1 100644 --- a/examples/volumes/azure_file/README.md +++ b/examples/volumes/azure_file/README.md @@ -1,35 +1 @@ -# How to Use it? - -Install *cifs-utils* on the Kubernetes host. For example, on Fedora based Linux - - # yum -y install cifs-utils - -Note, as explained in [Azure File Storage for Linux](https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/), the Linux hosts and the file share must be in the same Azure region. - -Obtain an Microsoft Azure storage account and create a [secret](secret/azure-secret.yaml) that contains the base64 encoded Azure Storage account name and key. In the secret file, base64-encode Azure Storage account name and pair it with name *azurestorageaccountname*, and base64-encode Azure Storage access key and pair it with name *azurestorageaccountkey*. - -Then create a Pod using the volume spec based on [azure](azure.yaml). - -In the pod, you need to provide the following information: - -- *secretName*: the name of the secret that contains both Azure storage account name and key. -- *shareName*: The share name to be used. -- *readOnly*: Whether the filesystem is used as readOnly. - -Create the secret: - -```console - # kubectl create -f examples/volumes/azure_file/secret/azure-secret.yaml -``` - -You should see the account name and key from `kubectl get secret` - -Then create the Pod: - -```console - # kubectl create -f examples/volumes/azure_file/azure.yaml -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/azure_file/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_file/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_file/README.md) diff --git a/examples/volumes/cephfs/README.md b/examples/volumes/cephfs/README.md index ce6263cdae9..851e01c571e 100644 --- a/examples/volumes/cephfs/README.md +++ b/examples/volumes/cephfs/README.md @@ -1,38 +1 @@ -# How to Use it? - -Install Ceph on the Kubernetes host. For example, on Fedora 21 - - # yum -y install ceph - -If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/ceph/ceph-docker/tree/master/examples/kubernetes) - -Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*. - -Once you have installed Ceph and a Kubernetes cluster, you can create a pod based on my examples [cephfs.yaml](cephfs.yaml) and [cephfs-with-secret.yaml](cephfs-with-secret.yaml). In the pod yaml, you need to provide the following information. - -- *monitors*: Array of Ceph monitors. -- *path*: Used as the mounted root, rather than the full Ceph tree. If not provided, default */* is used. -- *user*: The RADOS user name. If not provided, default *admin* is used. -- *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used. -- *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*. -- *readOnly*: Whether the filesystem is used as readOnly. - - -Here are the commands: - -```console - # kubectl create -f examples/volumes/cephfs/cephfs.yaml - - # create a secret if you want to use Ceph secret instead of secret file - # kubectl create -f examples/volumes/cephfs/secret/ceph-secret.yaml - - # kubectl create -f examples/volumes/cephfs/cephfs-with-secret.yaml - # kubectl get pods -``` - - If you ssh to that machine, you can run `docker ps` to see the actual pod and `docker inspect` to see the volumes used by the container. - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/cephfs/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/cephfs/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/cephfs/README.md) diff --git a/examples/volumes/cinder/README.md b/examples/volumes/cinder/README.md index bbb3c470e80..4d008c58e10 100644 --- a/examples/volumes/cinder/README.md +++ b/examples/volumes/cinder/README.md @@ -1,27 +1 @@ -This is a simple web server pod which serves HTML from an Cinder volume. - -Create a volume in the same tenant and zone as your node. - -Add your volume information in the pod description file cinder-web.yaml then create the pod: - -```shell - $ kubectl create -f examples/volumes/cinder/cinder-web.yaml -``` - -Add some data to the volume if is empty: - -```sh - $ echo "Hello World" >& /var/lib/kubelet/plugins/kubernetes.io/cinder/mounts/{Volume ID}/index.html -``` - -You should now be able to query your web server: - -```sh - $ curl - $ Hello World -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/cinder/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/cinder/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/cinder/README.md) diff --git a/examples/volumes/fibre_channel/README.md b/examples/volumes/fibre_channel/README.md index 0e1bcf9b39e..308bae60264 100644 --- a/examples/volumes/fibre_channel/README.md +++ b/examples/volumes/fibre_channel/README.md @@ -1,73 +1 @@ -## Step 1. Setting up Fibre Channel Target - -On your FC SAN Zone manager, allocate and mask LUNs so Kubernetes hosts can access them. - -## Step 2. Creating the Pod with Fibre Channel persistent storage - -Once you have installed Fibre Channel initiator and new Kubernetes, you can create a pod based on my example [fc.yaml](fc.yaml). In the pod JSON, you need to provide *targetWWNs* (array of Fibre Channel target's World Wide Names), *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean. - -Once your pod is created, run it on the Kubernetes master: - -```console -kubectl create -f ./your_new_pod.json -``` - -Here is my command and output: - -```console -# kubectl create -f examples/volumes/fibre_channel/fc.yaml -# kubectl get pods -NAME READY STATUS RESTARTS AGE -fcpd 2/2 Running 0 10m -``` - -On the Kubernetes host, I got these in mount output - -```console -#mount |grep /var/lib/kubelet/plugins/kubernetes.io -/dev/mapper/360a98000324669436c2b45666c567946 on /var/lib/kubelet/plugins/kubernetes.io/fc/500a0982991b8dc5-lun-2 type ext4 (ro,relatime,seclabel,stripe=16,data=ordered) -/dev/mapper/360a98000324669436c2b45666c567944 on /var/lib/kubelet/plugins/kubernetes.io/fc/500a0982991b8dc5-lun-1 type ext4 (rw,relatime,seclabel,stripe=16,data=ordered) -``` - -If you ssh to that machine, you can run `docker ps` to see the actual pod. - -```console -# docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415 -5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742 -2948683253f7 gcr.io/google_containers/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf -``` - -## Multipath - -To leverage multiple paths for block storage, it is important to perform the -multipath configuration on the host. -If your distribution does not provide `/etc/multipath.conf`, then you can -either use the following minimalistic one: - - defaults { - find_multipaths yes - user_friendly_names yes - } - -or create a new one by running: - - $ mpathconf --enable - -Finally you'll need to ensure to start or reload and enable multipath: - - $ systemctl enable multipathd.service - $ systemctl restart multipathd.service - -**Note:** Any change to `multipath.conf` or enabling multipath can lead to -inaccessible block devices, because they'll be claimed by multipath and -exposed as a device in /dev/mapper/*. - -Some additional informations about multipath can be found in the -[iSCSI documentation](../iscsi/README.md) - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/fibre_channel/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/fibre_channel/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/fibre_channel/README.md) diff --git a/examples/volumes/flexvolume/README.md b/examples/volumes/flexvolume/README.md index 8f88b690cf5..33ce85b526e 100644 --- a/examples/volumes/flexvolume/README.md +++ b/examples/volumes/flexvolume/README.md @@ -1 +1 @@ -Please refer to https://github.com/kubernetes/community/tree/master/contributors/devel/flexvolume.md for documentation. \ No newline at end of file +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/flexvolume/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/flexvolume/README.md) diff --git a/examples/volumes/flexvolume/nfs b/examples/volumes/flexvolume/nfs index 31254506327..4d0977cec87 100755 --- a/examples/volumes/flexvolume/nfs +++ b/examples/volumes/flexvolume/nfs @@ -48,7 +48,7 @@ domount() { SHARE=$(echo $2 | jq -r '.share') if [ $(ismounted) -eq 1 ] ; then - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 fi @@ -59,14 +59,14 @@ domount() { err "{ \"status\": \"Failure\", \"message\": \"Failed to mount ${NFS_SERVER}:${SHARE} at ${MNTPATH}\"}" exit 1 fi - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 } unmount() { MNTPATH=$1 if [ $(ismounted) -eq 0 ] ; then - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 fi @@ -76,14 +76,19 @@ unmount() { exit 1 fi - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 } op=$1 +if ! command -v jq >/dev/null 2>&1; then + err "{ \"status\": \"Failure\", \"message\": \"'jq' binary not found. Please install jq package before using this driver\"}" + exit 1 +fi + if [ "$op" = "init" ]; then - log "{\"status\": \"Success\", \"capabilities\": {\"attach\": false}}" + log '{"status": "Success", "capabilities": {"attach": false}}' exit 0 fi @@ -101,7 +106,7 @@ case "$op" in unmount $* ;; *) - log "{ \"status\": \"Not supported\" }" + log '{"status": "Not supported"}' exit 0 esac diff --git a/examples/volumes/flocker/README.md b/examples/volumes/flocker/README.md index 72ad47bc3bd..906446f4cc4 100644 --- a/examples/volumes/flocker/README.md +++ b/examples/volumes/flocker/README.md @@ -1,115 +1 @@ -## Using Flocker volumes - -[Flocker](https://clusterhq.com/flocker) is an open-source clustered container data volume manager. It provides management -and orchestration of data volumes backed by a variety of storage backends. - -This example provides information about how to set-up a Flocker installation and configure it in Kubernetes, as well as how to use the plugin to use Flocker datasets as volumes in Kubernetes. - -### Prerequisites - -A Flocker cluster is required to use Flocker with Kubernetes. A Flocker cluster comprises: - -- *Flocker Control Service*: provides a REST over HTTP API to modify the desired configuration of the cluster; -- *Flocker Dataset Agent(s)*: a convergence agent that modifies the cluster state to match the desired configuration; -- *Flocker Container Agent(s)*: a convergence agent that modifies the cluster state to match the desired configuration (unused in this configuration but still required in the cluster). - -The Flocker cluster can be installed on the same nodes you are using for Kubernetes. For instance, you can install the Flocker Control Service on the same node as Kubernetes Master and Flocker Dataset/Container Agents on every Kubernetes Slave node. - -It is recommended to follow [Installing Flocker](https://docs.clusterhq.com/en/latest/install/index.html) and the instructions below to set-up the Flocker cluster to be used with Kubernetes. - -#### Flocker Control Service - -The Flocker Control Service should be installed manually on a host. In the future, this may be deployed in pod(s) and exposed as a Kubernetes service. - -#### Flocker Agent(s) - -The Flocker Agents should be manually installed on *all* Kubernetes nodes. These agents are responsible for (de)attachment and (un)mounting and are therefore services that should be run with appropriate privileges on these hosts. - -In order for the plugin to connect to Flocker (via REST API), several environment variables must be specified on *all* Kubernetes nodes. This may be specified in an init script for the node's Kubelet service, for example, you could store the below environment variables in a file called `/etc/flocker/env` and place `EnvironmentFile=/etc/flocker/env` into `/etc/systemd/system/kubelet.service` or wherever the `kubelet.service` file lives. - -The environment variables that need to be set are: - -- `FLOCKER_CONTROL_SERVICE_HOST` should refer to the hostname of the Control Service -- `FLOCKER_CONTROL_SERVICE_PORT` should refer to the port of the Control Service (the API service defaults to 4523 but this must still be specified) - -The following environment variables should refer to keys and certificates on the host that are specific to that host. - -- `FLOCKER_CONTROL_SERVICE_CA_FILE` should refer to the full path to the cluster certificate file -- `FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE` should refer to the full path to the [api key](https://docs.clusterhq.com/en/latest/config/generate-api-plugin.html) file for the API user -- `FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE` should refer to the full path to the [api certificate](https://docs.clusterhq.com/en/latest/config/generate-api-plugin.html) file for the API user - -More details regarding cluster authentication can be found at the documentation: [Flocker Cluster Security & Authentication](https://docs.clusterhq.com/en/latest/concepts/security.html) and [Configuring Cluster Authentication](https://docs.clusterhq.com/en/latest/config/configuring-authentication.html). - -### Create a pod with a Flocker volume - -**Note**: A new dataset must first be provisioned using the Flocker tools or Docker CLI *(To use the Docker CLI, you need the [Flocker plugin for Docker](https://clusterhq.com/docker-plugin/) installed along with Docker 1.9+)*. For example, using the [Volumes CLI](https://docs.clusterhq.com/en/latest/labs/volumes-cli.html), create a new dataset called 'my-flocker-vol' of size 10GB: - -```sh -flocker-volumes create -m name=my-flocker-vol -s 10G -n - -# -n or --node= Is the initial primary node for dataset (any unique -# prefix of node uuid, see flocker-volumes list-nodes) -``` - -The following *volume* spec from the [example pod](flocker-pod.yml) illustrates how to use this Flocker dataset as a volume. - -> Note, the [example pod](flocker-pod.yml) used here does not include a replication controller, therefore the POD will not be rescheduled upon failure. If your looking for an example that does include a replication controller and service spec you can use [this example pod including a replication controller](flocker-pod-with-rc.yml) - -```yaml - volumes: - - name: www-root - flocker: - datasetName: my-flocker-vol -``` - -- **datasetName** is the unique name for the Flocker dataset and should match the *name* in the metadata. - -Use `kubetctl` to create the pod. - -```sh -$ kubectl create -f examples/volumes/flocker/flocker-pod.yml -``` - -You should now verify that the pod is running and determine it's IP address: - -```sh -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -flocker 1/1 Running 0 3m -$ kubectl get pods flocker -t '{{.status.hostIP}}{{"\n"}}' -172.31.25.62 -``` - -An `ls` of the `/flocker` directory on the host (identified by the IP as above) will show the mount point for the volume. - -```sh -$ ls /flocker -0cf8789f-00da-4da0-976a-b6b1dc831159 -``` - -You can also see the mountpoint by inspecting the docker container on that host. - -```sh -$ docker inspect -f "{{.Mounts}}" | grep flocker -...{ /flocker/0cf8789f-00da-4da0-976a-b6b1dc831159 /usr/share/nginx/html true} -``` - -Add an index.html inside this directory and use `curl` to see this HTML file served up by nginx. - -```sh -$ echo "

      Hello, World

      " | tee /flocker/0cf8789f-00da-4da0-976a-b6b1dc831159/index.html -$ curl ip - -``` - -### More Info - -Read more about the [Flocker Cluster Architecture](https://docs.clusterhq.com/en/latest/concepts/architecture.html) and learn more about Flocker by visiting the [Flocker Documentation](https://docs.clusterhq.com/). - -#### Video Demo - -To see a demo example of using Kubernetes and Flocker, visit [Flocker's blog post on High Availability with Kubernetes and Flocker](https://clusterhq.com/2015/12/22/ha-demo-kubernetes-flocker/) - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/flocker/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/flocker/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/flocker/README.md) diff --git a/examples/volumes/glusterfs/README.md b/examples/volumes/glusterfs/README.md index 027fcd8af78..ae7d23c5f1b 100644 --- a/examples/volumes/glusterfs/README.md +++ b/examples/volumes/glusterfs/README.md @@ -1,117 +1 @@ -## GlusterFS - -[GlusterFS](http://www.gluster.org) is an open source scale-out filesystem. These examples provide information about how to allow containers use GlusterFS volumes. - -There are couple of ways to use GlusterFS as a persistent data store in application pods. - -*) Static Provisioning of GlusterFS Volumes. -*) Dynamic Provisioning of GlusterFS Volumes. - -### Static Provisioning - -Static Provisioning of GlusterFS Volumes is analogues to creation of a PV ( Persistent Volume) resource by specifying the parameters in it. This -also need a working GlusterFS cluster/trusted pool available to carve out GlusterFS volumes. - -The example assumes that you have already set up a GlusterFS server cluster and have a working GlusterFS volume ready to use in the containers. - -#### Prerequisites - -* Set up a GlusterFS server cluster -* Create a GlusterFS volume -* If you are not using hyperkube, you may need to install the GlusterFS client package on the Kubernetes nodes ([Guide](http://gluster.readthedocs.io/en/latest/Administrator%20Guide/)) - -#### Create endpoints - -The first step is to create the GlusterFS endpoints definition in Kubernetes. Here is a snippet of [glusterfs-endpoints.json](glusterfs-endpoints.json): - -``` - "subsets": [ - { - "addresses": [{ "ip": "10.240.106.152" }], - "ports": [{ "port": 1 }] - }, - { - "addresses": [{ "ip": "10.240.79.157" }], - "ports": [{ "port": 1 }] - } - ] -``` - -The `subsets` field should be populated with the addresses of the nodes in the GlusterFS cluster. It is fine to provide any valid value (from 1 to 65535) in the `port` field. - -Create the endpoints: - -```sh -$ kubectl create -f examples/volumes/glusterfs/glusterfs-endpoints.json -``` - -You can verify that the endpoints are successfully created by running - -```sh -$ kubectl get endpoints -NAME ENDPOINTS -glusterfs-cluster 10.240.106.152:1,10.240.79.157:1 -``` - -We also need to create a service for these endpoints, so that they will persist. We will add this service without a selector to tell Kubernetes we want to add its endpoints manually. You can see [glusterfs-service.json](glusterfs-service.json) for details. - -Use this command to create the service: - -```sh -$ kubectl create -f examples/volumes/glusterfs/glusterfs-service.json -``` - - -#### Create a Pod - -The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration: - -```json -"volumes": [ - { - "name": "glusterfsvol", - "glusterfs": { - "endpoints": "glusterfs-cluster", - "path": "kube_vol", - "readOnly": true - } - } -] -``` - -The parameters are explained as the followings. - -- **endpoints** is the name of the Endpoints object that represents a Gluster cluster configuration. *kubelet* is optimized to avoid mount storm, it will randomly pick one from the endpoints to mount. If this host is unresponsive, the next Gluster host in the endpoints is automatically selected. -- **path** is the Glusterfs volume name. -- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite. - -Create a pod that has a container using Glusterfs volume, - -```sh -$ kubectl create -f examples/volumes/glusterfs/glusterfs-pod.json -``` - -You can verify that the pod is running: - -```sh -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -glusterfs 1/1 Running 0 3m -``` - -You may execute the command `mount` inside the container to see if the GlusterFS volume is mounted correctly: - -```sh -$ kubectl exec glusterfs -- mount | grep gluster -10.240.106.152:kube_vol on /mnt/glusterfs type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) -``` - -You may also run `docker ps` on the host to see the actual container. - -### Dynamic Provisioning of GlusterFS Volumes: - -Dynamic Provisioning means provisioning of GlusterFS volumes based on a Storage class. Please refer [this guide](./../../persistent-volume-provisioning/README.md) -. - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/glusterfs/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/glusterfs/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/glusterfs/README.md) diff --git a/examples/volumes/iscsi/README.md b/examples/volumes/iscsi/README.md index 7f6090eff31..6b8ca9a854c 100644 --- a/examples/volumes/iscsi/README.md +++ b/examples/volumes/iscsi/README.md @@ -1,136 +1 @@ -## Introduction - -The Kubernetes iSCSI implementation can connect to iSCSI devices via open-iscsi and multipathd on Linux. -Currently supported features are - * Connecting to one portal - * Mounting a device directly or via multipathd - * Formatting and partitioning any new device connected - * CHAP authentication - -## Prerequisites - -This example expects there to be a working iSCSI target to connect to. -If there isn't one in place then it is possible to setup a software version on Linux by following these guides - - * [Setup a iSCSI target on Fedora](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi) - * [Install the iSCSI initiator on Fedora](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi&f=2) - * [Install multipathd for mpio support if required](http://www.linuxstories.eu/2014/07/how-to-setup-dm-multipath-on-rhel.html) - - -## Creating the pod with iSCSI persistent storage - -Once you have configured the iSCSI initiator, you can create a pod based on the example *iscsi.yaml*. In the pod YAML, you need to provide *targetPortal* (the iSCSI target's **IP** address and *port* if not the default port 3260), target's *iqn*, *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean. No initiator information is required. If you have more than one target portals for a single IQN, you can mention other portal IPs in *portals* field. - -If you want to use an iSCSI offload card or other open-iscsi transports besides tcp, setup an iSCSI interface and provide *iscsiInterface* in the pod YAML. The default name for an iscsi iface (open-iscsi parameter iface.iscsi\_ifacename) is in the format transport\_name.hwaddress when generated by iscsiadm. See [open-iscsi](http://www.open-iscsi.org/docs/README) or [openstack](http://docs.openstack.org/kilo/config-reference/content/iscsi-iface-config.html) for detailed configuration information. - -**Note:** If you have followed the instructions in the links above you -may have partitioned the device, the iSCSI volume plugin does not -currently support partitions so format the device as one partition or leave the device raw and Kubernetes will partition and format it one first mount. - -### CHAP Authentication - -To enable one-way or two-way CHAP authentication for discovery or session, following these steps. - - * Set `chapAuthDiscovery` to `true` for discovery authentication. - * Set `chapAuthSession` to `true` for session authentication. - * Create a CHAP secret and set `secretRef` to reference the CHAP secret. - - -Example can be found at [iscsi-chap.yaml](iscsi-chap.yaml) - -### CHAP Secret - -As illustrated in [chap-secret.yaml](chap-secret.yaml), the secret must have type `kubernetes.io/iscsi-chap` and consists of the following keys: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: chap-secret -type: "kubernetes.io/iscsi-chap" -data: - discovery.sendtargets.auth.username: - discovery.sendtargets.auth.password: - discovery.sendtargets.auth.username_in: - discovery.sendtargets.auth.password_in: - node.session.auth.username: - node.session.auth.password: - node.session.auth.username_in: - node.session.auth.password_in: -``` - -These keys map to those used by Open-iSCSI initiator. Detailed documents on these keys can be found at [Open-iSCSI](https://github.com/open-iscsi/open-iscsi/blob/master/etc/iscsid.conf) - -#### Create CHAP secret before creating iSCSI volumes and Pods - -```console -# kubectl create -f examples/volumes/iscsi/chap-iscsi.yaml -``` - - - -Once the pod config is created, run it on the Kubernetes master: - -```console -kubectl create -f ./your_new_pod.yaml -``` - -Here is the example pod created and expected output: - -```console -# kubectl create -f examples/volumes/iscsi/iscsi.yaml -# kubectl get pods -NAME READY STATUS RESTARTS AGE -iscsipd 2/2 RUNNING 0 2m -``` - -On the Kubernetes node, verify the mount output - -For a non mpio device the output should look like the following - -```console -# mount |grep kub -/dev/sdb on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.15:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/sdb on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw type ext4 (ro,relatime,data=ordered) -/dev/sdc on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.16:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/sdc on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw type ext4 (rw,relatime,data=ordered) -/dev/sdd on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.17:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/sdd on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw type ext4 (rw,relatime,data=ordered) -``` - -And for a node with mpio enabled the expected output would be similar to the following - -```console -# mount |grep kub -/dev/mapper/mpatha on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.15:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/mapper/mpatha on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-ro type ext4 (ro,relatime,data=ordered) -/dev/mapper/mpathb on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.16:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/mapper/mpathb on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw type ext4 (rw,relatime,data=ordered) -/dev/mapper/mpathc on /var/lib/kubelet/plugins/kubernetes.io/iscsi/10.0.2.17:3260-iqn.2001-04.com.example:storage.kube.sys1.xyz-lun-0 type ext4 (rw,relatime,data=ordered) -/dev/mapper/mpathb on /var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw type ext4 (rw,relatime,data=ordered) -``` - - -If you ssh to that machine, you can run `docker ps` to see the actual pod. - -```console -# docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -3b8a772515d2 kubernetes/pause "/pause" 6 minutes ago Up 6 minutes k8s_iscsipd-rw.ed58ec4e_iscsipd_default_f527ca5b-6d87-11e5-aa7e-080027ff6387_d25592c5 -``` - -Run *docker inspect* and verify the container mounted the host directory into the their */mnt/iscsipd* directory. - -```console -# docker inspect --format '{{ range .Mounts }}{{ if eq .Destination "/mnt/iscsipd" }}{{ .Source }}{{ end }}{{ end }}' f855336407f4 -/var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-ro - -# docker inspect --format '{{ range .Mounts }}{{ if eq .Destination "/mnt/iscsipd" }}{{ .Source }}{{ end }}{{ end }}' 3b8a772515d2 -/var/lib/kubelet/pods/f527ca5b-6d87-11e5-aa7e-080027ff6387/volumes/kubernetes.io~iscsi/iscsipd-rw -``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/iscsi/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/iscsi/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/iscsi/README.md) diff --git a/examples/volumes/nfs/README.md b/examples/volumes/nfs/README.md index f5a9d662087..acd56f937df 100644 --- a/examples/volumes/nfs/README.md +++ b/examples/volumes/nfs/README.md @@ -1,165 +1 @@ -# Outline - -This example describes how to create Web frontend server, an auto-provisioned persistent volume on GCE, and an NFS-backed persistent claim. - -Demonstrated Kubernetes Concepts: - -* [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) to - define persistent disks (disk lifecycle not tied to the Pods). -* [Services](https://kubernetes.io/docs/concepts/services-networking/service/) to enable Pods to - locate one another. - -![alt text][nfs pv example] - -As illustrated above, two persistent volumes are used in this example: - -- Web frontend Pod uses a persistent volume based on NFS server, and -- NFS server uses an auto provisioned [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) from GCE PD or AWS EBS. - -Note, this example uses an NFS container that doesn't support NFSv4. - -[nfs pv example]: nfs-pv.png - - -## Quickstart - -```console -$ kubectl create -f examples/volumes/nfs/provisioner/nfs-server-gce-pv.yaml -$ kubectl create -f examples/volumes/nfs/nfs-server-rc.yaml -$ kubectl create -f examples/volumes/nfs/nfs-server-service.yaml -# get the cluster IP of the server using the following command -$ kubectl describe services nfs-server -# use the NFS server IP to update nfs-pv.yaml and execute the following -$ kubectl create -f examples/volumes/nfs/nfs-pv.yaml -$ kubectl create -f examples/volumes/nfs/nfs-pvc.yaml -# run a fake backend -$ kubectl create -f examples/volumes/nfs/nfs-busybox-rc.yaml -# get pod name from this command -$ kubectl get pod -l name=nfs-busybox -# use the pod name to check the test file -$ kubectl exec nfs-busybox-jdhf3 -- cat /mnt/index.html -``` - -## Example of NFS based persistent volume - -See [NFS Service and Replication Controller](nfs-web-rc.yaml) for a quick example of how to use an NFS -volume claim in a replication controller. It relies on the -[NFS persistent volume](nfs-pv.yaml) and -[NFS persistent volume claim](nfs-pvc.yaml) in this example as well. - -## Complete setup - -The example below shows how to export a NFS share from a single pod replication -controller and import it into two replication controllers. - -### NFS server part - -Define [the NFS Service and Replication Controller](nfs-server-rc.yaml) and -[NFS service](nfs-server-service.yaml): - -The NFS server exports an an auto-provisioned persistent volume backed by GCE PD: - -```console -$ kubectl create -f examples/volumes/nfs/provisioner/nfs-server-gce-pv.yaml -``` - -```console -$ kubectl create -f examples/volumes/nfs/nfs-server-rc.yaml -$ kubectl create -f examples/volumes/nfs/nfs-server-service.yaml -``` - -The directory contains dummy `index.html`. Wait until the pod is running -by checking `kubectl get pods -l role=nfs-server`. - -### Create the NFS based persistent volume claim - -The [NFS busybox controller](nfs-busybox-rc.yaml) uses a simple script to -generate data written to the NFS server we just started. First, you'll need to -find the cluster IP of the server: - -```console -$ kubectl describe services nfs-server -``` - -Replace the invalid IP in the [nfs PV](nfs-pv.yaml). (In the future, -we'll be able to tie these together using the service names, but for -now, you have to hardcode the IP.) - -Create the the [persistent volume](https://kubernetes.io/docs/user-guide/persistent-volumes.md) -and the persistent volume claim for your NFS server. The persistent volume and -claim gives us an indirection that allow multiple pods to refer to the NFS -server using a symbolic name rather than the hardcoded server address. - -```console -$ kubectl create -f examples/volumes/nfs/nfs-pv.yaml -$ kubectl create -f examples/volumes/nfs/nfs-pvc.yaml -``` - -## Setup the fake backend - -The [NFS busybox controller](nfs-busybox-rc.yaml) updates `index.html` on the -NFS server every 10 seconds. Let's start that now: - -```console -$ kubectl create -f examples/volumes/nfs/nfs-busybox-rc.yaml -``` - -Conveniently, it's also a `busybox` pod, so we can get an early check -that our mounts are working now. Find a busybox pod and exec: - -```console -$ kubectl get pod -l name=nfs-busybox -NAME READY STATUS RESTARTS AGE -nfs-busybox-jdhf3 1/1 Running 0 25m -nfs-busybox-w3s4t 1/1 Running 0 25m -$ kubectl exec nfs-busybox-jdhf3 -- cat /mnt/index.html -Thu Oct 22 19:20:18 UTC 2015 -nfs-busybox-w3s4t -``` - -You should see output similar to the above if everything is working well. If -it's not, make sure you changed the invalid IP in the [NFS PV](nfs-pv.yaml) file -and make sure the `describe services` command above had endpoints listed -(indicating the service was associated with a running pod). - -### Setup the web server - -The [web server controller](nfs-web-rc.yaml) is an another simple replication -controller demonstrates reading from the NFS share exported above as a NFS -volume and runs a simple web server on it. - -Define the pod: - -```console -$ kubectl create -f examples/volumes/nfs/nfs-web-rc.yaml -``` - -This creates two pods, each of which serve the `index.html` from above. We can -then use a simple service to front it: - -```console -kubectl create -f examples/volumes/nfs/nfs-web-service.yaml -``` - -We can then use the busybox container we launched before to check that `nginx` -is serving the data appropriately: - -```console -$ kubectl get pod -l name=nfs-busybox -NAME READY STATUS RESTARTS AGE -nfs-busybox-jdhf3 1/1 Running 0 1h -nfs-busybox-w3s4t 1/1 Running 0 1h -$ kubectl get services nfs-web -NAME LABELS SELECTOR IP(S) PORT(S) -nfs-web role=web-frontend 10.0.68.37 80/TCP -$ kubectl exec nfs-busybox-jdhf3 -- wget -qO- http://10.0.68.37 -Thu Oct 22 19:28:55 UTC 2015 -nfs-busybox-w3s4t -``` - - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/nfs/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/README.md) diff --git a/examples/volumes/nfs/nfs-data/README.md b/examples/volumes/nfs/nfs-data/README.md index df4b26d08ef..0b3a8b31988 100644 --- a/examples/volumes/nfs/nfs-data/README.md +++ b/examples/volumes/nfs/nfs-data/README.md @@ -1,13 +1 @@ -# NFS-exporter container with a file - -This container exports /exports with index.html in it via NFS. Based on -../exports. Since some Linux kernels have issues running NFSv4 daemons in containers, -only NFSv3 is opened in this container. - -Available as `gcr.io/google-samples/nfs-server` - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/nfs/nfs-data/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/nfs-data/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/nfs-data/README.md) diff --git a/examples/volumes/portworx/README.md b/examples/volumes/portworx/README.md index 36e87b4adfe..d1ab62468ea 100644 --- a/examples/volumes/portworx/README.md +++ b/examples/volumes/portworx/README.md @@ -1,370 +1 @@ -# Portworx Volume - - - [Portworx](#portworx) - - [Prerequisites](#prerequisites) - - [Examples](#examples) - - [Using Pre-provisioned Portworx Volumes](#pre-provisioned) - - [Running Pod](#running-pod) - - [Persistent Volumes](#persistent-volumes) - - [Using Dynamic Provisioning](#dynamic-provisioning) - - [Storage Class](#storage-class) - -## Portworx - -[Portworx](http://www.portworx.com) can be used as a storage provider for your Kubernetes cluster. Portworx pools your servers capacity and turns your servers -or cloud instances into converged, highly available compute and storage nodes - -## Prerequisites - -- A Portworx instance running on all of your Kubernetes nodes. For - more information on how you can install Portworx can be found [here](http://docs.portworx.com) - -## Examples - -The following examples assumes that you already have a running Kubernetes cluster with Portworx installed on all nodes. - -### Using Pre-provisioned Portworx Volumes - - Create a Volume using Portworx CLI. - On one of the Kubernetes nodes with Portworx installed run the following command - - ```shell - /opt/pwx/bin/pxctl volume create --size --fs - ``` - -#### Running Pods - - Create Pod which uses Portworx Volumes - - Example spec: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: test-portworx-volume-pod - spec: - containers: - - image: gcr.io/google_containers/test-webserver - name: test-container - volumeMounts: - - mountPath: /test-portworx-volume - name: test-volume - volumes: - - name: test-volume - # This Portworx volume must already exist. - portworxVolume: - volumeID: "" - fsType: "" - ``` - - [Download example](portworx-volume-pod.yaml?raw=true) - - Make sure to replace and in the above spec with - the ones that you used while creating the volume. - - Create the Pod. - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pod.yaml - ``` - - Verify that pod is running: - - ```bash - $ kubectl.sh get pods - NAME READY STATUS RESTARTS AGE - test-portworx-volume-pod 1/1 Running 0 16s - ``` - -#### Persistent Volumes - - 1. Create Persistent Volume. - - Example spec: - - ```yaml - apiVersion: v1 - kind: PersistentVolume - metadata: - name: - spec: - capacity: - storage: Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - portworxVolume: - volumeID: "" - fsType: "" - ``` - - Make sure to replace , and in the above spec with - the ones that you used while creating the volume. - - [Download example](portworx-volume-pv.yaml?raw=true) - - Creating the persistent volume: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pv.yaml - ``` - - Verifying persistent volume is created: - - ``` bash - $ kubectl describe pv pv0001 - Name: pv0001 - Labels: - StorageClass: - Status: Available - Claim: - Reclaim Policy: Retain - Access Modes: RWO - Capacity: 2Gi - Message: - Source: - Type: PortworxVolume (a Portworx Persistent Volume resource) - VolumeID: pv0001 - FSType: ext4 - No events. - ``` - - 2. Create Persistent Volume Claim. - - Example spec: - - ```yaml - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: pvc0001 - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: Gi - ``` - - [Download example](portworx-volume-pvc.yaml?raw=true) - - Creating the persistent volume claim: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pvc.yaml - ``` - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pvc pvc0001 - Name: pvc0001 - Namespace: default - Status: Bound - Volume: pv0001 - Labels: - Capacity: 2Gi - Access Modes: RWO - No events. - ``` - - 3. Create Pod which uses Persistent Volume Claim. - - See example: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: pvpod - spec: - containers: - - name: test-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - name: test-volume - mountPath: /test-portworx-volume - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: pvc0001 - ``` - - [Download example](portworx-volume-pvcpod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pvcpod.yaml - ``` - - Verifying pod is created: - - ``` bash - $ kubectl get pod pvpod - NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m - ``` - -### Using Dynamic Provisioning - -Using Dynamic Provisioning and Storage Classes you don't need to -create Portworx volumes out of band and they will be created automatically. - -#### Storage Class - - Using Storage Classes objects an admin can define the different classes of Portworx Volumes - that are offered in a cluster. Following are the different parameters that can be used to define a Portworx - Storage Class - - * `fs`: filesystem to be laid out: none|xfs|ext4 (default: `ext4`) - * `block_size`: block size in Kbytes (default: `32`) - * `repl`: replication factor [1..3] (default: `1`) - * `io_priority`: IO Priority: [high|medium|low] (default: `low`) - * `snap_interval`: snapshot interval in minutes, 0 disables snaps (default: `0`) - * `aggregation_level`: specifies the number of replication sets the volume can be aggregated from (default: `1`) - * `ephemeral`: ephemeral storage [true|false] (default `false`) - - - 1. Create Storage Class. - - See example: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: portworx-io-priority-high - provisioner: kubernetes.io/portworx-volume - parameters: - repl: "1" - snap_interval: "70" - io_priority: "high" - ``` - - [Download example](portworx-volume-sc-high.yaml?raw=true) - - Creating the storageclass: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-sc-high.yaml - ``` - - Verifying storage class is created: - - ``` bash - $ kubectl describe storageclass portworx-io-priority-high - Name: portworx-io-priority-high - IsDefaultClass: No - Annotations: - Provisioner: kubernetes.io/portworx-volume - Parameters: io_priority=high,repl=1,snapshot_interval=70 - No events. - ``` - - 2. Create Persistent Volume Claim. - - See example: - - ```yaml - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: pvcsc001 - annotations: - volume.beta.kubernetes.io/storage-class: portworx-io-priority-high - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - ``` - - [Download example](portworx-volume-pvcsc.yaml?raw=true) - - Creating the persistent volume claim: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pvcsc.yaml - ``` - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pvc pvcsc001 - Name: pvcsc001 - Namespace: default - StorageClass: portworx-io-priority-high - Status: Bound - Volume: pvc-e5578707-c626-11e6-baf6-08002729a32b - Labels: - Capacity: 2Gi - Access Modes: RWO - No Events - ``` - - Persistent Volume is automatically created and is bounded to this pvc. - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pv pvc-e5578707-c626-11e6-baf6-08002729a32b - Name: pvc-e5578707-c626-11e6-baf6-08002729a32b - Labels: - StorageClass: portworx-io-priority-high - Status: Bound - Claim: default/pvcsc001 - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 2Gi - Message: - Source: - Type: PortworxVolume (a Portworx Persistent Volume resource) - VolumeID: 374093969022973811 - No events. - ``` - - 3. Create Pod which uses Persistent Volume Claim with storage class. - - See example: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: pvpod - spec: - containers: - - name: test-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - name: test-volume - mountPath: /test-portworx-volume - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: pvcsc001 - ``` - - [Download example](portworx-volume-pvcscpod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/portworx/portworx-volume-pvcscpod.yaml - ``` - - Verifying pod is created: - - ``` bash - $ kubectl get pod pvpod - NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m - ``` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/portworx/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/portworx/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/portworx/README.md) diff --git a/examples/volumes/quobyte/Readme.md b/examples/volumes/quobyte/Readme.md index 4199a232a85..fe41c8e595e 100644 --- a/examples/volumes/quobyte/Readme.md +++ b/examples/volumes/quobyte/Readme.md @@ -1,98 +1 @@ - - -- [Quobyte Volume](#quobyte-volume) - - [Quobyte](#quobyte) - - [Prerequisites](#prerequisites) - - [Fixed user Mounts](#fixed-user-mounts) - - [Creating a pod](#creating-a-pod) - - - -# Quobyte Volume - -## Quobyte - -[Quobyte](http://www.quobyte.com) is software that turns commodity servers into a reliable and highly automated multi-data center file system. - -The example assumes that you already have a running Kubernetes cluster and you already have setup Quobyte-Client (1.3+) on each Kubernetes node. - -### Prerequisites - -- Running Quobyte storage cluster -- Quobyte client (1.3+) installed on the Kubernetes nodes more information how you can install Quobyte on your Kubernetes nodes, can be found in the [documentation](https://support.quobyte.com) of Quobyte. -- To get access to Quobyte and the documentation please [contact us](http://www.quobyte.com/get-quobyte) -- Already created Quobyte Volume -- Added the line `allow-usermapping-in-volumename` in `/etc/quobyte/client.cfg` to allow the fixed user mounts - -### Fixed user Mounts - -Quobyte supports since 1.3 fixed user mounts. The fixed-user mounts simply allow to mount all Quobyte Volumes inside one directory and use them as different users. All access to the Quobyte Volume will be rewritten to the specified user and group – both are optional, independent of the user inside the container. You can read more about it [here](https://blog.inovex.de/docker-plugins) under the section "Quobyte Mount and Docker — what’s special" - -## Creating a pod - -See example: - - - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: quobyte -spec: - containers: - - name: quobyte - image: kubernetes/pause - volumeMounts: - - mountPath: /mnt - name: quobytevolume - volumes: - - name: quobytevolume - quobyte: - registry: registry:7861 - volume: testVolume - readOnly: false - user: root - group: root -``` - -[Download example](quobyte-pod.yaml?raw=true) - - -Parameters: -* **registry** Quobyte registry to use to mount the volume. You can specify the registry as : pair or if you want to specify multiple registries you just have to put a comma between them e.q. :,:,:. The host can be an IP address or if you have a working DNS you can also provide the DNS names. -* **volume** volume represents a Quobyte volume which must be created before usage. -* **readOnly** is the boolean that sets the mountpoint readOnly or readWrite. -* **user** maps all access to this user. Default is `root`. -* **group** maps all access to this group. Default is `nfsnobody`. - -Creating the pod: - -```bash -$ kubectl create -f examples/volumes/quobyte/quobyte-pod.yaml -``` - -Verify that the pod is running: - -```bash -$ kubectl get pods quobyte -NAME READY STATUS RESTARTS AGE -quobyte 1/1 Running 0 48m - -$ kubectl get pods quobyte --template '{{.status.hostIP}}{{"\n"}}' -10.245.1.3 -``` - -SSH onto the Machine and validate that quobyte is mounted: - -```bash -$ mount | grep quobyte -quobyte@10.239.10.21:7861/ on /var/lib/kubelet/plugins/kubernetes.io~quobyte type fuse (rw,nosuid,nodev,noatime,user_id=0,group_id=0,default_permissions,allow_other) - -$ docker inspect --format '{{ range .Mounts }}{{ if eq .Destination "/mnt"}}{{ .Source }}{{ end }}{{ end }}' 55ab97593cd3 -/var/lib/kubelet/plugins/kubernetes.io~quobyte/root#root@testVolume -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/quobyte/Readme.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/quobyte/Readme.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/quobyte/Readme.md) diff --git a/examples/volumes/rbd/README.md b/examples/volumes/rbd/README.md index bf4f9852eae..46b19d8d360 100644 --- a/examples/volumes/rbd/README.md +++ b/examples/volumes/rbd/README.md @@ -1,59 +1 @@ -# How to Use it? - -Install Ceph on the Kubernetes host. For example, on Fedora 21 - - # yum -y install ceph-common - -If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/ceph/ceph-docker) - -Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*. - -Once you have installed Ceph and new Kubernetes, you can create a pod based on my examples [rbd.json](rbd.json) [rbd-with-secret.json](rbd-with-secret.json). In the pod JSON, you need to provide the following information. - -- *monitors*: Ceph monitors. -- *pool*: The name of the RADOS pool, if not provided, default *rbd* pool is used. -- *image*: The image name that rbd has created. -- *user*: The RADOS user name. If not provided, default *admin* is used. -- *keyring*: The path to the keyring file. If not provided, default */etc/ceph/keyring* is used. -- *secretName*: The name of the authentication secrets. If provided, *secretName* overrides *keyring*. Note, see below about how to create a secret. -- *fsType*: The filesystem type (ext4, xfs, etc) that formatted on the device. -- *readOnly*: Whether the filesystem is used as readOnly. - -# Use Ceph Authentication Secret - -If Ceph authentication secret is provided, the secret should be first be *base64 encoded*, then encoded string is placed in a secret yaml. For example, getting Ceph user `kube`'s base64 encoded secret can use the following command: - -```console - # grep key /etc/ceph/ceph.client.kube.keyring |awk '{printf "%s", $NF}'|base64 -QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ== -``` - -An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command. - -```console - # kubectl create -f examples/volumes/rbd/secret/ceph-secret.yaml -``` - -# Get started - -Here are my commands: - -```console - # kubectl create -f examples/volumes/rbd/rbd.json - # kubectl get pods -``` - -On the Kubernetes host, I got these in mount output - -```console - #mount |grep kub - /dev/rbd0 on /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/kube-image-foo type ext4 (ro,relatime,stripe=4096,data=ordered) - /dev/rbd0 on /var/lib/kubelet/pods/ec2166b4-de07-11e4-aaf5-d4bed9b39058/volumes/kubernetes.io~rbd/rbdpd type ext4 (ro,relatime,stripe=4096,data=ordered) -``` - - If you ssh to that machine, you can run `docker ps` to see the actual pod and `docker inspect` to see the volumes used by the container. - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/rbd/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/rbd/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/rbd/README.md) diff --git a/examples/volumes/scaleio/README.md b/examples/volumes/scaleio/README.md index 65bec664117..fb7f9a6fac7 100644 --- a/examples/volumes/scaleio/README.md +++ b/examples/volumes/scaleio/README.md @@ -1,302 +1 @@ - - - - -WARNING -WARNING -WARNING -WARNING -WARNING - -

      PLEASE NOTE: This document applies to the HEAD of the source tree

      - -If you are using a released version of Kubernetes, you should -refer to the docs that go with that version. - -Documentation for other releases can be found at -[releases.k8s.io](http://releases.k8s.io). - --- - - - - - -# Dell EMC ScaleIO Volume Plugin for Kubernetes - -This document shows how to configure Kubernetes resources to consume storage from volumes hosted on ScaleIO cluster. - -## Pre-Requisites - -* Kubernetes ver 1.6 or later -* ScaleIO ver 2.0 or later -* A ScaleIO cluster with an API gateway -* ScaleIO SDC binary installed/configured on each Kubernetes node that will consume storage - -## ScaleIO Setup - -This document assumes you are familiar with ScaleIO and have a cluster ready to go. If you are *not familiar* with ScaleIO, please review *Learn how to setup a 3-node* [ScaleIO cluster on Vagrant](https://github.com/codedellemc/labs/tree/master/setup-scaleio-vagrant) and see *General instructions on* [setting up ScaleIO](https://www.emc.com/products-solutions/trial-software-download/scaleio.htm) - -For this demonstration, ensure the following: - - - The ScaleIO `SDC` component is installed and properly configured on all Kubernetes nodes where deployed pods will consume ScaleIO-backed volumes. - - You have a configured ScaleIO gateway that is accessible from the Kubernetes nodes. - -## Deploy Kubernetes Secret for ScaleIO - -The ScaleIO plugin uses a Kubernetes Secret object to store the `username` and `password` credentials. -Kuberenetes requires the secret values to be base64-encoded to simply obfuscate (not encrypt) the clear text as shown below. - -``` -$> echo -n "siouser" | base64 -c2lvdXNlcg== -$> echo -n "sc@l3I0" | base64 -c2NAbDNJMA== -``` -The previous will generate `base64-encoded` values for the username and password. -Remember to generate the credentials for your own environment and copy them in a secret file similar to the following. - -File: [secret.yaml](secret.yaml) - -``` -apiVersion: v1 -kind: Secret -metadata: - name: sio-secret -type: kubernetes.io/scaleio -data: - username: c2lvdXNlcg== - password: c2NAbDNJMA== -``` - -Notice the name of the secret specified above as `sio-secret`. It will be referred in other YAML files. Next, deploy the secret. - -``` -$ kubectl create -f ./examples/volumes/scaleio/secret.yaml -``` - -## Deploying Pods with Persistent Volumes - -The example presented in this section shows how the ScaleIO volume plugin can automatically attach, format, and mount an existing ScaleIO volume for pod. -The Kubernetes ScaleIO volume spec supports the following attributes: - -| Attribute | Description | -|-----------|-------------| -| gateway | address to a ScaleIO API gateway (required)| -| system | the name of the ScaleIO system (required)| -| protectionDomain| the name of the ScaleIO protection domain (default `default`)| -| storagePool| the name of the volume storage pool (default `default`)| -| storageMode| the storage provision mode: `ThinProvisionned` (default) or `ThickProvisionned`| -| volumeName| the name of an existing volume in ScaleIO (required)| -| secretRef:name| reference to a configured Secret object (required, see Secret earlier)| -| readOnly| specifies the access mode to the mounted volume (default `false`)| -| fsType| the file system to use for the volume (default `ext4`)| - -### Create Volume - -Static persistent volumes require that the volume, to be consumed by the pod, be already created in ScaleIO. You can use your ScaleIO tooling to create a new volume or use the name of a volume that already exists in ScaleIO. For this demo, we assume there's a volume named `vol-0`. If you want to use an existing volume, ensure its name is reflected properly in the `volumeName` attribute below. - -### Deploy Pod YAML - -Create a pod YAML file that declares the volume (above) to be used. - -File: [pod.yaml](pod.yaml) - -``` -apiVersion: v1 -kind: Pod -metadata: - name: pod-0 -spec: - containers: - - image: gcr.io/google_containers/test-webserver - name: pod-0 - volumeMounts: - - mountPath: /test-pd - name: vol-0 - volumes: - - name: vol-0 - scaleIO: - gateway: https://localhost:443/api - system: scaleio - volumeName: vol-0 - secretRef: - name: sio-secret - fsType: xfs -``` -Notice the followings in the previous YAML: - -- Update the `gatewway` to point to your ScaleIO gateway endpoint. -- The `volumeName` attribute refers to the name of an existing volume in ScaleIO. -- The `secretRef:name` attribute references the name of the secret object deployed earlier. - -Next, deploy the pod. - -``` -$> kubectl create -f examples/volumes/scaleio/pod.yaml -``` -You can verify the pod: -``` -$> kubectl get pod -NAME READY STATUS RESTARTS AGE -pod-0 1/1 Running 0 33s -``` -Or for more detail, use -``` -kubectl describe pod pod-0 -``` -You can see the attached/mapped volume on the node: -``` -$> lsblk -NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT -... -scinia 252:0 0 8G 0 disk /var/lib/kubelet/pods/135986c7-dcb7-11e6-9fbf-080027c990a7/volumes/kubernetes.io~scaleio/vol-0 -``` - -## StorageClass and Dynamic Provisioning - -In the example in this section, we will see how the ScaleIO volume plugin can automatically provision described in a `StorageClass`. -The ScaleIO volume plugin is a dynamic provisioner identified as `kubernetes.io/scaleio` and supports the following parameters: - -| Parameter | Description | -|-----------|-------------| -| gateway | address to a ScaleIO API gateway (required)| -| system | the name of the ScaleIO system (required)| -| protectionDomain| the name of the ScaleIO protection domain (default `default`)| -| storagePool| the name of the volume storage pool (default `default`)| -| storageMode| the storage provision mode: `ThinProvisionned` (default) or `ThickProvisionned`| -| secretRef| reference to the name of a configured Secret object (required)| -| readOnly| specifies the access mode to the mounted volume (default `false`)| -| fsType| the file system to use for the volume (default `ext4`)| - - -### ScaleIO StorageClass - -Define a new `StorageClass` as shown in the following YAML. - -File [sc.yaml](sc.yaml) - -``` -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: sio-small -provisioner: kubernetes.io/scaleio -parameters: - gateway: https://localhost:443/api - system: scaleio - protectionDomain: default - secretRef: sio-secret - fsType: xfs -``` -Note the followings: - -- The `name` attribute is set to sio-small . It will be referenced later. -- The `secretRef` attribute matches the name of the Secret object created earlier. - -Next, deploy the storage class file. - -``` -$> kubectl create -f examples/volumes/scaleio/sc.yaml - -$> kubectl get sc -NAME TYPE -sio-small kubernetes.io/scaleio -``` - -### PVC for the StorageClass - -The next step is to define/deploy a `PersistentVolumeClaim` that will use the StorageClass. - -File [sc-pvc.yaml](sc-pvc.yaml) - -``` -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: pvc-sio-small - annotations: - volume.beta.kubernetes.io/storage-class: sio-small -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi -``` - -Note the `annotations:` entry which specifies annotation `volume.beta.kubernetes.io/storage-class: sio-small` which references the name of the storage class defined earlier. - -Next, we deploy PVC file for the storage class. This step will cause the Kubernetes ScaleIO plugin to create the volume in the storage system. -``` -$> kubectl create -f examples/volumes/scaleio/sc-pvc.yaml -``` -You verify that a new volume created in the ScaleIO dashboard. You can also verify the newly created volume as follows. -``` - kubectl get pvc -NAME STATUS VOLUME CAPACITY ACCESSMODES AGE -pvc-sio-small Bound pvc-5fc78518-dcae-11e6-a263-080027c990a7 10Gi RWO 1h -``` - -###Pod for PVC and SC -At this point, the volume is created (by the claim) in the storage system. To use it, we must define a pod that references the volume as done in this YAML. - -File [pod-sc-pvc.yaml](pod-sc-pvc.yaml) - -``` -kind: Pod -apiVersion: v1 -metadata: - name: pod-sio-small -spec: - containers: - - name: pod-sio-small-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - mountPath: /test - name: test-data - volumes: - - name: test-data - persistentVolumeClaim: - claimName: pvc-sio-small -``` - -Notice that the `claimName:` attribute refers to the name of the PVC defined and deployed earlier. Next, let us deploy the file. - -``` -$> kubectl create -f examples/volumes/scaleio/pod-sc-pvc.yaml -``` -We can now verify that the new pod is deployed OK. -``` -kubectl get pod -NAME READY STATUS RESTARTS AGE -pod-0 1/1 Running 0 23m -pod-sio-small 1/1 Running 0 5s -``` -You can use the ScaleIO dashboard to verify that the new volume has one attachment. You can verify the volume information for the pod: -``` -$> kubectl describe pod pod-sio-small -... -Volumes: - test-data: - Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) - ClaimName: pvc-sio-small - ReadOnly: false -... -``` -Lastly, you can see the volume's attachment on the Kubernetes node: -``` -$> lsblk -... -scinia 252:0 0 8G 0 disk /var/lib/kubelet/pods/135986c7-dcb7-11e6-9fbf-080027c990a7/volumes/kubernetes.io~scaleio/vol-0 -scinib 252:16 0 16G 0 disk /var/lib/kubelet/pods/62db442e-dcba-11e6-9fbf-080027c990a7/volumes/kubernetes.io~scaleio/sio-5fc9154ddcae11e68db708002 - -``` - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/scaleio/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/scaleio/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/scaleio/README.md) diff --git a/examples/volumes/storageos/README.md b/examples/volumes/storageos/README.md index 9aba1435e0b..c2d6f74dbb5 100644 --- a/examples/volumes/storageos/README.md +++ b/examples/volumes/storageos/README.md @@ -1,475 +1 @@ -# StorageOS Volume - - - [StorageOS](#storageos) - - [Prerequisites](#prerequisites) - - [Examples](#examples) - - [Pre-provisioned Volumes](#pre-provisioned) - - [Pod](#pod) - - [Persistent Volumes](#persistent-volumes) - - [Dynamic Provisioning](#dynamic-provisioning) - - [Storage Class](#storage-class) - - [API Configuration](#api-configuration) - -## StorageOS - -[StorageOS](https://www.storageos.com) can be used as a storage provider for your Kubernetes cluster. StorageOS runs as a container within your Kubernetes environment, making local storage accessible from any node within the Kubernetes cluster. Data can be replicated to protect against node failure. - -At its core, StorageOS provides block storage. You may choose the filesystem type to install to make devices usable from within containers. - -## Prerequisites - -The StorageOS container must be running on each Kubernetes node that wants to contribute storage or that wants to consume storage. For more information on how you can run StorageOS, consult the [StorageOS documentation](https://docs.storageos.com). - -## API Configuration - -The StorageOS provider has been pre-configured to use the StorageOS API defaults, and no additional configuration is required for testing. If you have changed the API port, or have removed the default account or changed its password (recommended), you must specify the new settings. This is done using Kubernetes [Secrets](../../../docs/user-guide/secrets/). - -API configuration is set by using Kubernetes secrets. The configuration secret supports the following parameters: - -* `apiAddress`: The address of the StorageOS API. This is optional and defaults to `tcp://localhost:5705`, which should be correct if the StorageOS container is running using the default settings. -* `apiUsername`: The username to authenticate to the StorageOS API with. -* `apiPassword`: The password to authenticate to the StorageOS API with. -* `apiVersion`: Optional, string value defaulting to `1`. Only set this if requested in StorageOS documentation. - -Mutiple credentials can be used by creating different secrets. - -For Persistent Volumes, secrets must be created in the Pod namespace. Specify the secret name using the `secretName` parameter when attaching existing volumes in Pods or creating new persistent volumes. - -For dynamically provisioned volumes using storage classes, the secret can be created in any namespace. Note that you would want this to be an admin-controlled namespace with restricted access to users. Specify the secret namespace as parameter `adminSecretNamespace` and name as parameter `adminSecretName` in storage classes. - -Example spec: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: storageos-secret -type: "kubernetes.io/storageos" -data: - apiAddress: dGNwOi8vMTI3LjAuMC4xOjU3MDU= - apiUsername: c3RvcmFnZW9z - apiPassword: c3RvcmFnZW9z -``` - -Values for `apiAddress`, `apiUsername` and `apiPassword` can be generated with: - -```bash -$ echo -n "tcp://127.0.0.1:5705" | base64 -dGNwOi8vMTI3LjAuMC4xOjU3MDU= -``` - -Create the secret: - -```bash -$ kubectl create -f storageos-secret.yaml -secret "storageos-secret" created -``` - -Verify the secret: - -```bash -$ kubectl describe secret storageos-secret -Name: storageos-secret -Namespace: default -Labels: -Annotations: - -Type: kubernetes.io/storageos - -Data -==== -apiAddress: 20 bytes -apiPassword: 8 bytes -apiUsername: 8 bytes - -``` -## Examples - -These examples assume you have a running Kubernetes cluster with the StorageOS container running on each node, and that an API configuration secret called `storageos-secret` has been created in the `default` namespace. - -### Pre-provisioned Volumes - -#### Pod - -Pods can be created that access volumes directly. - -1. Create a volume using the StorageOS UI, CLI or API. Consult the [StorageOS documentation](https://docs.storageos.com) for details. -1. Create a pod that refers to the new volume. In this case the volume is named `redis-vol01`. - - Example spec: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - labels: - name: redis - role: master - name: test-storageos-redis - spec: - containers: - - name: master - image: kubernetes/redis:v1 - env: - - name: MASTER - value: "true" - ports: - - containerPort: 6379 - resources: - limits: - cpu: "0.1" - volumeMounts: - - mountPath: /redis-master-data - name: redis-data - volumes: - - name: redis-data - storageos: - # This volume must already exist within StorageOS - volumeName: redis-vol01 - # volumeNamespace is optional, and specifies the volume scope within - # StorageOS. If no namespace is provided, it will use the namespace - # of the pod. Set to `default` or leave blank if you are not using - # namespaces. - #volumeNamespace: test-storageos - # The filesystem type to format the volume with, if required. - fsType: ext4 - # The secret name for API credentials - secretName: storageos-secret - ``` - - [Download example](storageos-pod.yaml?raw=true) - - Create the pod: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-pod.yaml - ``` - - Verify that the pod is running: - - ```bash - $ kubectl get pods test-storageos-redis - NAME READY STATUS RESTARTS AGE - test-storageos-redis 1/1 Running 0 30m - ``` - -### Persistent Volumes - -1. Create a volume using the StorageOS UI, CLI or API. Consult the [StorageOS documentation](https://docs.storageos.com) for details. -1. Create the persistent volume `redis-vol01`. - - Example spec: - - ```yaml - apiVersion: v1 - kind: PersistentVolume - metadata: - name: pv0001 - spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Delete - storageos: - # This volume must already exist within StorageOS - volumeName: pv0001 - # volumeNamespace is optional, and specifies the volume scope within - # StorageOS. Set to `default` or leave blank if you are not using - # namespaces. - #volumeNamespace: default - # The filesystem type to create on the volume, if required. - fsType: ext4 - # The secret name for API credentials - secretName: storageos-secret - ``` - - [Download example](storageos-pv.yaml?raw=true) - - Create the persistent volume: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-pv.yaml - ``` - - Verify that the pv has been created: - - ```bash - $ kubectl describe pv pv0001 - Name: pv0001 - Labels: - Annotations: - StorageClass: fast - Status: Available - Claim: - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 5Gi - Message: - Source: - Type: StorageOS (a StorageOS Persistent Disk resource) - VolumeName: pv0001 - VolumeNamespace: - FSType: ext4 - ReadOnly: false - Events: - ``` - -1. Create persistent volume claim - - Example spec: - - ```yaml - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: pvc0001 - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi - storageClassName: fast - ``` - - [Download example](storageos-pvc.yaml?raw=true) - - Create the persistent volume claim: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-pvc.yaml - ``` - - Verify that the pvc has been created: - - ```bash - $ kubectl describe pvc pvc0001 - Name: pvc0001 - Namespace: default - StorageClass: fast - Status: Bound - Volume: pv0001 - Labels: - Capacity: 5Gi - Access Modes: RWO - No events. - ``` - -1. Create pod which uses the persistent volume claim - - Example spec: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - labels: - name: redis - role: master - name: test-storageos-redis-pvc - spec: - containers: - - name: master - image: kubernetes/redis:v1 - env: - - name: MASTER - value: "true" - ports: - - containerPort: 6379 - resources: - limits: - cpu: "0.1" - volumeMounts: - - mountPath: /redis-master-data - name: redis-data - volumes: - - name: redis-data - persistentVolumeClaim: - claimName: pvc0001 - ``` - - [Download example](storageos-pvcpod.yaml?raw=true) - - Create the pod: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-pvcpod.yaml - ``` - - Verify that the pod has been created: - - ```bash - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - test-storageos-redis-pvc 1/1 Running 0 40s - ``` - -### Dynamic Provisioning - -Dynamic provisioning can be used to auto-create volumes when needed. They require a Storage Class, a Persistent Volume Claim, and a Pod. - -#### Storage Class - -Kubernetes administrators can use storage classes to define different types of storage made available within the cluster. Each storage class definition specifies a provisioner type and any parameters needed to access it, as well as any other configuration. - -StorageOS supports the following storage class parameters: - -* `pool`: The name of the StorageOS distributed capacity pool to provision the volume from. Uses the `default` pool which is normally present if not specified. -* `description`: The description to assign to volumes that were created dynamically. All volume descriptions will be the same for the storage class, but different storage classes can be used to allow descriptions for different use cases. Defaults to `Kubernetes volume`. -* `fsType`: The default filesystem type to request. Note that user-defined rules within StorageOS may override this value. Defaults to `ext4`. -* `adminSecretNamespace`: The namespace where the API configuration secret is located. Required if adminSecretName set. -* `adminSecretName`: The name of the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. - -1. Create storage class - - Example spec: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: sc-fast - provisioner: kubernetes.io/storageos - parameters: - pool: default - description: Kubernetes volume - fsType: ext4 - adminSecretNamespace: default - adminSecretName: storageos-secret - ``` - - [Download example](storageos-sc.yaml?raw=true) - - Create the storage class: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-sc.yaml - ``` - - Verify the storage class has been created: - - ```bash - $ kubectl describe storageclass fast - Name: fast - IsDefaultClass: No - Annotations: - Provisioner: kubernetes.io/storageos - Parameters: description=Kubernetes volume,fsType=ext4,pool=default,secretName=storageos-secret - No events. - ``` - -1. Create persistent volume claim - - Example spec: - - ```yaml - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fast0001 - spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi - ``` - - Create the persistent volume claim (pvc): - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-sc-pvc.yaml - ``` - - Verify the pvc has been created: - - ```bash - $ kubectl describe pvc fast0001 - Name: fast0001 - Namespace: default - StorageClass: fast - Status: Bound - Volume: pvc-480952e7-f8e0-11e6-af8c-08002736b526 - Labels: - Capacity: 5Gi - Access Modes: RWO - Events: - - ``` - - A new persistent volume will also be created and bound to the pvc: - - ```bash - $ kubectl describe pv pvc-480952e7-f8e0-11e6-af8c-08002736b526 - Name: pvc-480952e7-f8e0-11e6-af8c-08002736b526 - Labels: storageos.driver=filesystem - StorageClass: fast - Status: Bound - Claim: default/fast0001 - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 5Gi - Message: - Source: - Type: StorageOS (a StorageOS Persistent Disk resource) - VolumeName: pvc-480952e7-f8e0-11e6-af8c-08002736b526 - Namespace: default - FSType: ext4 - ReadOnly: false - No events. - ``` - -1. Create pod which uses the persistent volume claim - - Example spec: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - labels: - name: redis - role: master - name: test-storageos-redis-sc-pvc - spec: - containers: - - name: master - image: kubernetes/redis:v1 - env: - - name: MASTER - value: "true" - ports: - - containerPort: 6379 - resources: - limits: - cpu: "0.1" - volumeMounts: - - mountPath: /redis-master-data - name: redis-data - volumes: - - name: redis-data - persistentVolumeClaim: - claimName: fast0001 - ``` - - [Download example](storageos-sc-pvcpod.yaml?raw=true) - - Create the pod: - - ```bash - $ kubectl create -f examples/volumes/storageos/storageos-sc-pvcpod.yaml - ``` - - Verify that the pod has been created: - - ```bash - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - test-storageos-redis-sc-pvc 1/1 Running 0 44s - ``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/storageos/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/storageos/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/storageos/README.md) diff --git a/examples/volumes/vsphere/README.md b/examples/volumes/vsphere/README.md index f049acf71bd..11c6e1abeae 100644 --- a/examples/volumes/vsphere/README.md +++ b/examples/volumes/vsphere/README.md @@ -1,674 +1 @@ -# vSphere Volume - - - [Prerequisites](#prerequisites) - - [Examples](#examples) - - [Volumes](#volumes) - - [Persistent Volumes](#persistent-volumes) - - [Storage Class](#storage-class) - - [Storage Policy Management inside kubernetes] (#storage-policy-management-inside-kubernetes) - - [Using existing vCenter SPBM policy] (#using-existing-vcenter-spbm-policy) - - [Virtual SAN policy support](#virtual-san-policy-support) - - [Stateful Set](#stateful-set) - -## Prerequisites - -- Kubernetes with vSphere Cloud Provider configured. - For cloudprovider configuration please refer [vSphere getting started guide](http://kubernetes.io/docs/getting-started-guides/vsphere/). - -## Examples - -### Volumes - - 1. Create VMDK. - - First ssh into ESX and then use following command to create vmdk, - - ```shell - vmkfstools -c 2G /vmfs/volumes/datastore1/volumes/myDisk.vmdk - ``` - - 2. Create Pod which uses 'myDisk.vmdk'. - - See example - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: test-vmdk - spec: - containers: - - image: gcr.io/google_containers/test-webserver - name: test-container - volumeMounts: - - mountPath: /test-vmdk - name: test-volume - volumes: - - name: test-volume - # This VMDK volume must already exist. - vsphereVolume: - volumePath: "[datastore1] volumes/myDisk" - fsType: ext4 - ``` - - [Download example](vsphere-volume-pod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pod.yaml - ``` - - Verify that pod is running: - - ```bash - $ kubectl get pods test-vmdk - NAME READY STATUS RESTARTS AGE - test-vmdk 1/1 Running 0 48m - ``` - -### Persistent Volumes - - 1. Create VMDK. - - First ssh into ESX and then use following command to create vmdk, - - ```shell - vmkfstools -c 2G /vmfs/volumes/datastore1/volumes/myDisk.vmdk - ``` - - 2. Create Persistent Volume. - - See example: - - ```yaml - apiVersion: v1 - kind: PersistentVolume - metadata: - name: pv0001 - spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - vsphereVolume: - volumePath: "[datastore1] volumes/myDisk" - fsType: ext4 - ``` - In the above example datastore1 is located in the root folder. If datastore is member of Datastore Cluster or located in sub folder, the folder path needs to be provided in the VolumePath as below. - ```yaml - vsphereVolume: - VolumePath: "[DatastoreCluster/datastore1] volumes/myDisk" - ``` - - [Download example](vsphere-volume-pv.yaml?raw=true) - - Creating the persistent volume: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pv.yaml - ``` - - Verifying persistent volume is created: - - ``` bash - $ kubectl describe pv pv0001 - Name: pv0001 - Labels: - Status: Available - Claim: - Reclaim Policy: Retain - Access Modes: RWO - Capacity: 2Gi - Message: - Source: - Type: vSphereVolume (a Persistent Disk resource in vSphere) - VolumePath: [datastore1] volumes/myDisk - FSType: ext4 - No events. - ``` - - 3. Create Persistent Volume Claim. - - See example: - - ```yaml - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: pvc0001 - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - ``` - - [Download example](vsphere-volume-pvc.yaml?raw=true) - - Creating the persistent volume claim: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvc.yaml - ``` - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pvc pvc0001 - Name: pvc0001 - Namespace: default - Status: Bound - Volume: pv0001 - Labels: - Capacity: 2Gi - Access Modes: RWO - No events. - ``` - - 3. Create Pod which uses Persistent Volume Claim. - - See example: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: pvpod - spec: - containers: - - name: test-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - name: test-volume - mountPath: /test-vmdk - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: pvc0001 - ``` - - [Download example](vsphere-volume-pvcpod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcpod.yaml - ``` - - Verifying pod is created: - - ``` bash - $ kubectl get pod pvpod - NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m - ``` - -### Storage Class - - __Note: Here you don't need to create vmdk it is created for you.__ - 1. Create Storage Class. - - Example 1: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - fstype: ext3 - ``` - - [Download example](vsphere-volume-sc-fast.yaml?raw=true) - - You can also specify the datastore in the Storageclass as shown in example 2. The volume will be created on the datastore specified in the storage class. - This field is optional. If not specified as shown in example 1, the volume will be created on the datastore specified in the vsphere config file used to initialize the vSphere Cloud Provider. - - Example 2: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - datastore: VSANDatastore - ``` - If datastore is member of DataStore Cluster or within some sub folder, the datastore folder path needs to be provided in the datastore parameter as below. - - ```yaml - parameters: - datastore: DatastoreCluster/VSANDatastore - ``` - - [Download example](vsphere-volume-sc-with-datastore.yaml?raw=true) - Creating the storageclass: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-sc-fast.yaml - ``` - - Verifying storage class is created: - - ``` bash - $ kubectl describe storageclass fast - Name: fast - IsDefaultClass: No - Annotations: - Provisioner: kubernetes.io/vsphere-volume - Parameters: diskformat=zeroedthick,fstype=ext3 - No events. - ``` - - 2. Create Persistent Volume Claim. - - See example: - - ```yaml - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: pvcsc001 - annotations: - volume.beta.kubernetes.io/storage-class: fast - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - ``` - - [Download example](vsphere-volume-pvcsc.yaml?raw=true) - - Creating the persistent volume claim: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcsc.yaml - ``` - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pvc pvcsc001 - Name: pvcsc001 - Namespace: default - StorageClass: fast - Status: Bound - Volume: pvc-83295256-f8e0-11e6-8263-005056b2349c - Labels: - Capacity: 2Gi - Access Modes: RWO - Events: - FirstSeen LastSeen Count From SubObjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 1m 1m 1 persistentvolume-controller Normal ProvisioningSucceeded Successfully provisioned volume pvc-83295256-f8e0-11e6-8263-005056b2349c using kubernetes.io/vsphere-volume - - ``` - - Persistent Volume is automatically created and is bounded to this pvc. - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pv pvc-83295256-f8e0-11e6-8263-005056b2349c - Name: pvc-83295256-f8e0-11e6-8263-005056b2349c - Labels: - StorageClass: fast - Status: Bound - Claim: default/pvcsc001 - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 2Gi - Message: - Source: - Type: vSphereVolume (a Persistent Disk resource in vSphere) - VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-83295256-f8e0-11e6-8263-005056b2349c.vmdk - FSType: ext3 - No events. - ``` - - __Note: VMDK is created inside ```kubevols``` folder in datastore which is mentioned in 'vsphere' cloudprovider configuration. - The cloudprovider config is created during setup of Kubernetes cluster on vSphere.__ - - 3. Create Pod which uses Persistent Volume Claim with storage class. - - See example: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: pvpod - spec: - containers: - - name: test-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - name: test-volume - mountPath: /test-vmdk - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: pvcsc001 - ``` - - [Download example](vsphere-volume-pvcscpod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml - ``` - - Verifying pod is created: - - ``` bash - $ kubectl get pod pvpod - NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m - ``` - -### Storage Policy Management inside kubernetes -#### Using existing vCenter SPBM policy - Admins can use the existing vCenter Storage Policy Based Management (SPBM) policy to configure a persistent volume with the SPBM policy. - - __Note: Here you don't need to create persistent volume it is created for you.__ - 1. Create Storage Class. - - Example 1: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - storagePolicyName: gold - ``` - [Download example](vsphere-volume-spbm-policy.yaml?raw=true) - - The admin specifies the SPBM policy - "gold" as part of storage class definition for dynamic volume provisioning. When a PVC is created, the persistent volume will be provisioned on a compatible datastore with maximum free space that satisfies the "gold" storage policy requirements. - - Example 2: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - storagePolicyName: gold - datastore: VSANDatastore - ``` - [Download example](vsphere-volume-spbm-policy-with-datastore.yaml?raw=true) - - The admin can also specify a custom datastore where he wants the volume to be provisioned along with the SPBM policy name. When a PVC is created, the vSphere Cloud Provider checks if the user specified datastore satisfies the "gold" storage policy requirements. If yes, it will provision the persistent volume on user specified datastore. If not, it will error out to the user that the user specified datastore is not compatible with "gold" storage policy requirements. - -#### Virtual SAN policy support - - Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements. - - The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN. The user can specify these storage capabilities as part of storage class defintion based on his application needs. - - The policy settings can be one or more of the following: - - * *hostFailuresToTolerate*: represents NumberOfFailuresToTolerate - * *diskStripes*: represents NumberofDiskStripesPerObject - * *objectSpaceReservation*: represents ObjectSpaceReservation - * *cacheReservation*: represents FlashReadCacheReservation - * *iopsLimit*: represents IOPSLimitForObject - * *forceProvisioning*: represents if volume must be Force Provisioned - - __Note: Here you don't need to create persistent volume it is created for you.__ - 1. Create Storage Class. - - Example 1: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - hostFailuresToTolerate: "2" - cachereservation: "20" - ``` - [Download example](vsphere-volume-sc-vsancapabilities.yaml?raw=true) - - Here a persistent volume will be created with the Virtual SAN capabilities - hostFailuresToTolerate to 2 and cachereservation is 20% read cache reserved for storage object. Also the persistent volume will be *zeroedthick* disk. - The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN and can be configured on the virtual disk. - - You can also specify the datastore in the Storageclass as shown in example 2. The volume will be created on the datastore specified in the storage class. - This field is optional. If not specified as shown in example 1, the volume will be created on the datastore specified in the vsphere config file used to initialize the vSphere Cloud Provider. - - Example 2: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: fast - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: zeroedthick - datastore: VSANDatastore - hostFailuresToTolerate: "2" - cachereservation: "20" - ``` - - [Download example](vsphere-volume-sc-vsancapabilities-with-datastore.yaml?raw=true) - - __Note: If you do not apply a storage policy during dynamic provisioning on a VSAN datastore, it will use a default Virtual SAN policy.__ - - Creating the storageclass: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml - ``` - - Verifying storage class is created: - - ``` bash - $ kubectl describe storageclass fast - Name: fast - Annotations: - Provisioner: kubernetes.io/vsphere-volume - Parameters: diskformat=zeroedthick, hostFailuresToTolerate="2", cachereservation="20" - No events. - ``` - - 2. Create Persistent Volume Claim. - - See example: - - ```yaml - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: pvcsc-vsan - annotations: - volume.beta.kubernetes.io/storage-class: fast - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - ``` - - [Download example](vsphere-volume-pvcsc.yaml?raw=true) - - Creating the persistent volume claim: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcsc.yaml - ``` - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pvc pvcsc-vsan - Name: pvcsc-vsan - Namespace: default - Status: Bound - Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Labels: - Capacity: 2Gi - Access Modes: RWO - No events. - ``` - - Persistent Volume is automatically created and is bounded to this pvc. - - Verifying persistent volume claim is created: - - ``` bash - $ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Labels: - Status: Bound - Claim: default/pvcsc-vsan - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 2Gi - Message: - Source: - Type: vSphereVolume (a Persistent Disk resource in vSphere) - VolumePath: [VSANDatastore] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk - FSType: ext4 - No events. - ``` - - __Note: VMDK is created inside ```kubevols``` folder in datastore which is mentioned in 'vsphere' cloudprovider configuration. - The cloudprovider config is created during setup of Kubernetes cluster on vSphere.__ - - 3. Create Pod which uses Persistent Volume Claim with storage class. - - See example: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: pvpod - spec: - containers: - - name: test-container - image: gcr.io/google_containers/test-webserver - volumeMounts: - - name: test-volume - mountPath: /test - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: pvcsc-vsan - ``` - - [Download example](vsphere-volume-pvcscpod.yaml?raw=true) - - Creating the pod: - - ``` bash - $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml - ``` - - Verifying pod is created: - - ``` bash - $ kubectl get pod pvpod - NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m - ``` - -### Stateful Set - -vSphere volumes can be consumed by Stateful Sets. - - 1. Create a storage class that will be used by the ```volumeClaimTemplates``` of a Stateful Set. - - See example: - - ```yaml - kind: StorageClass - apiVersion: storage.k8s.io/v1beta1 - metadata: - name: thin-disk - provisioner: kubernetes.io/vsphere-volume - parameters: - diskformat: thin - ``` - - [Download example](simple-storageclass.yaml) - - 2. Create a Stateful set that consumes storage from the Storage Class created. - - See example: - ```yaml - --- - apiVersion: v1 - kind: Service - metadata: - name: nginx - labels: - app: nginx - spec: - ports: - - port: 80 - name: web - clusterIP: None - selector: - app: nginx - --- - apiVersion: apps/v1beta1 - kind: StatefulSet - metadata: - name: web - spec: - serviceName: "nginx" - replicas: 14 - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.beta.kubernetes.io/storage-class: thin-disk - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - ``` - This will create Persistent Volume Claims for each replica and provision a volume for each claim if an existing volume could be bound to the claim. - - [Download example](simple-statefulset.yaml) - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/vsphere/README.md?pixel)]() - +This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/volumes/vsphere/README.md](https://github.com/kubernetes/examples/blob/master/staging/volumes/vsphere/README.md) diff --git a/federation/cluster/BUILD b/federation/cluster/BUILD index 500bdb28821..6cc62052baf 100644 --- a/federation/cluster/BUILD +++ b/federation/cluster/BUILD @@ -2,24 +2,6 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_binary", - "go_library", -) - -go_binary( - name = "cluster", - library = ":go_default_library", - tags = ["automanaged"], -) - -go_library( - name = "go_default_library", - srcs = ["template.go"], - tags = ["automanaged"], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/federation/cluster/common.sh b/federation/cluster/common.sh index d0610a6523b..e9a8ba088d0 100644 --- a/federation/cluster/common.sh +++ b/federation/cluster/common.sh @@ -46,11 +46,6 @@ if [[ -z "${HOST_CLUSTER_CONTEXT}" ]]; then HOST_CLUSTER_CONTEXT="${CLUSTER_CONTEXT}" fi -# kube-dns configuration. -KUBEDNS_CONFIGMAP_NAME="kube-dns" -KUBEDNS_CONFIGMAP_NAMESPACE="kube-system" -KUBEDNS_FEDERATION_FLAG="federations" - function federation_cluster_contexts() { local -r contexts=$("${KUBE_ROOT}/cluster/kubectl.sh" config get-contexts -o name) federation_contexts=() @@ -69,347 +64,10 @@ function federation_cluster_contexts() { } -#-----------------------------------------------------------------# -# NOTE: # -# Everything below this line is deprecated. It will be removed # -# once we have sufficient confidence in kubefed based testing. # -#-----------------------------------------------------------------# - -# optional override -# FEDERATION_IMAGE_REPO_BASE: repo which federated images are tagged under (default gcr.io/google_containers) -# FEDERATION_NAMESPACE: name of the namespace will created for the federated components in the underlying cluster. -# KUBE_PLATFORM -# KUBE_ARCH -# KUBE_BUILD_STAGE - source "${KUBE_ROOT}/cluster/common.sh" host_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --namespace=${FEDERATION_NAMESPACE}" -# If $FEDERATION_PUSH_REPO_BASE isn't set, then set the GCR registry name -# based on the detected project name for gce and gke providers. -FEDERATION_PUSH_REPO_BASE=${FEDERATION_PUSH_REPO_BASE:-} -if [[ -z "${FEDERATION_PUSH_REPO_BASE}" ]]; then - if [[ "${KUBERNETES_PROVIDER}" == "gke" || "${KUBERNETES_PROVIDER}" == "gce" ]]; then - # Populates $PROJECT - detect-project - if [[ ${PROJECT} == *':'* ]]; then - echo "${PROJECT} contains ':' and can not be used as FEDERATION_PUSH_REPO_BASE. Please set FEDERATION_PUSH_REPO_BASE explicitly." - exit 1 - fi - FEDERATION_PUSH_REPO_BASE=gcr.io/${PROJECT} - else - echo "Must set FEDERATION_PUSH_REPO_BASE env var" - exit 1 - fi -fi - -FEDERATION_IMAGE_REPO_BASE=${FEDERATION_IMAGE_REPO_BASE:-'gcr.io/google_containers'} - -KUBE_PLATFORM=${KUBE_PLATFORM:-linux} -KUBE_ARCH=${KUBE_ARCH:-amd64} -KUBE_BUILD_STAGE=${KUBE_BUILD_STAGE:-release-stage} - -source "${KUBE_ROOT}/cluster/common.sh" - -host_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --namespace=${FEDERATION_NAMESPACE}" - -# required: -# FEDERATION_PUSH_REPO_BASE: repo to which federated container images will be pushed -# FEDERATION_IMAGE_TAG: reference and pull all federated images with this tag. -function create-federation-api-objects { -( - : "${FEDERATION_PUSH_REPO_BASE?Must set FEDERATION_PUSH_REPO_BASE env var}" - : "${FEDERATION_IMAGE_TAG?Must set FEDERATION_IMAGE_TAG env var}" - - export FEDERATION_APISERVER_DEPLOYMENT_NAME="federation-apiserver" - export FEDERATION_APISERVER_IMAGE_REPO="${FEDERATION_PUSH_REPO_BASE}/hyperkube-amd64" - export FEDERATION_APISERVER_IMAGE_TAG="${FEDERATION_IMAGE_TAG}" - - export FEDERATION_CONTROLLER_MANAGER_DEPLOYMENT_NAME="federation-controller-manager" - export FEDERATION_CONTROLLER_MANAGER_IMAGE_REPO="${FEDERATION_PUSH_REPO_BASE}/hyperkube-amd64" - export FEDERATION_CONTROLLER_MANAGER_IMAGE_TAG="${FEDERATION_IMAGE_TAG}" - - if [[ -z "${FEDERATION_DNS_PROVIDER:-}" ]]; then - # Set the appropriate value based on cloud provider. - if [[ "$KUBERNETES_PROVIDER" == "gce" || "${KUBERNETES_PROVIDER}" == "gke" ]]; then - echo "setting dns provider to google-clouddns" - export FEDERATION_DNS_PROVIDER="google-clouddns" - elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then - echo "setting dns provider to aws-route53" - export FEDERATION_DNS_PROVIDER="aws-route53" - else - echo "Must set FEDERATION_DNS_PROVIDER env var" - exit 1 - fi - fi - - export FEDERATION_SERVICE_CIDR=${FEDERATION_SERVICE_CIDR:-"10.10.0.0/24"} - - #Only used for providers that require a nodeport service (vagrant for now) - #We will use loadbalancer services where we can - export FEDERATION_API_NODEPORT=32111 - export FEDERATION_NAMESPACE - export FEDERATION_NAME="${FEDERATION_NAME:-federation}" - export DNS_ZONE_NAME="${DNS_ZONE_NAME:-federation.example.}" # See https://tools.ietf.org/html/rfc2606 - - template="go run ${KUBE_ROOT}/federation/cluster/template.go" - - FEDERATION_KUBECONFIG_PATH="${KUBE_ROOT}/federation/cluster/kubeconfig" - - federation_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --context=federation-cluster --namespace=default" - - manifests_root="${KUBE_ROOT}/federation/manifests/" - - $template "${manifests_root}/federation-ns.yaml" | $host_kubectl apply -f - - - cleanup-federation-api-objects - - export FEDERATION_API_HOST="" - export KUBE_MASTER_IP="" - export IS_DNS_NAME="false" - if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then - # The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes - $template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f - - node_addresses=`$host_kubectl get nodes -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'` - FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1` - KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}" - elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then - - # Any providers where ingress is a DNS name should tick this box. - # TODO(chom): attempt to do this automatically - if [[ "$KUBERNETES_PROVIDER" == "aws" ]];then - IS_DNS_NAME="true" - fi - # any capable providers should use a loadbalancer service - # we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider - # allows 30x5 = 150 seconds for loadbalancer creation - $template "${manifests_root}/federation-apiserver-lb-service.yaml" | $host_kubectl create -f - - for i in {1..30};do - echo "attempting to get federation-apiserver loadbalancer hostname ($i / 30)" - LB_STATUS=`${host_kubectl} get -o=jsonpath svc/${FEDERATION_APISERVER_DEPLOYMENT_NAME} --template '{.status.loadBalancer}'` - # Check if ingress field has been set in load balancer status. - if [[ "${LB_STATUS}" != *"ingress"* ]]; then - echo "Waiting for load balancer status to be set" - sleep 5 - continue - fi - for field in ip hostname;do - FEDERATION_API_HOST=`${host_kubectl} get -o=jsonpath svc/${FEDERATION_APISERVER_DEPLOYMENT_NAME} --template '{.status.loadBalancer.ingress[*].'"${field}}"` - if [[ ! -z "${FEDERATION_API_HOST// }" ]];then - break 2 - fi - done - if [[ $i -eq 30 ]];then - echo "Could not find ingress hostname for federation-apiserver loadbalancer service" - exit 1 - fi - sleep 5 - done - KUBE_MASTER_IP="${FEDERATION_API_HOST}:443" - else - echo "provider ${KUBERNETES_PROVIDER} is not (yet) supported for e2e testing" - exit 1 - fi - echo "Found federation-apiserver host at $FEDERATION_API_HOST" - - FEDERATION_API_TOKEN="$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)" - export FEDERATION_API_KNOWN_TOKENS="${FEDERATION_API_TOKEN},admin,admin" - gen-kube-basicauth - export FEDERATION_API_BASIC_AUTH="${KUBE_PASSWORD},${KUBE_USER},admin" - - # Create a kubeconfig with credentials for federation-apiserver. We will - # then use this kubeconfig to create a secret which the federation - # controller manager can use to talk to the federation-apiserver. - # Note that the file name should be "kubeconfig" so that the secret key gets the same name. - KUBECONFIG_DIR=$(dirname ${KUBECONFIG:-$DEFAULT_KUBECONFIG}) - CONTEXT=${FEDERATION_KUBE_CONTEXT} \ - KUBE_BEARER_TOKEN="$FEDERATION_API_TOKEN" \ - KUBE_USER="${KUBE_USER}" \ - KUBE_PASSWORD="${KUBE_PASSWORD}" \ - KUBECONFIG="${KUBECONFIG_DIR}/federation/federation-apiserver/kubeconfig" \ - create-kubeconfig - - # Create secret with federation-apiserver's kubeconfig - $host_kubectl create secret generic federation-apiserver-kubeconfig --from-file="${KUBECONFIG_DIR}/federation/federation-apiserver/kubeconfig" --namespace="${FEDERATION_NAMESPACE}" - - # Create secrets with all the kubernetes-apiserver's kubeconfigs. - # Note: This is used only by the test setup (where kubernetes clusters are - # brought up with FEDERATION=true). Users are expected to create this secret - # themselves. - for dir in ${KUBECONFIG_DIR}/federation/kubernetes-apiserver/*; do - # We create a secret with the same name as the directory name (which is - # same as cluster name in kubeconfig). - # Massage the name so that it is valid (should not contain "_" and max 253 - # chars) - name=$(basename $dir) - name=$(echo "$name" | sed -e "s/_/-/g") # Replace "_" by "-" - name=${name:0:252} - echo "Creating secret with name: $name" - $host_kubectl create secret generic ${name} --from-file="${dir}/kubeconfig" --namespace="${FEDERATION_NAMESPACE}" - done - - # Create server certificates. - kube::util::ensure-temp-dir - echo "Creating federation apiserver certs for federation api host: ${FEDERATION_API_HOST} ( is this a dns name?: ${IS_DNS_NAME} )" - MASTER_NAME="federation-apiserver" create-federation-apiserver-certs ${FEDERATION_API_HOST} - export FEDERATION_APISERVER_CA_CERT_BASE64="${FEDERATION_APISERVER_CA_CERT_BASE64}" - export FEDERATION_APISERVER_CERT_BASE64="${FEDERATION_APISERVER_CERT_BASE64}" - export FEDERATION_APISERVER_KEY_BASE64="${FEDERATION_APISERVER_KEY_BASE64}" - - # Enable the NamespaceLifecycle admission control by default. - export FEDERATION_ADMISSION_CONTROL="${FEDERATION_ADMISSION_CONTROL:-NamespaceLifecycle}" - - for file in federation-etcd-pvc.yaml federation-apiserver-{deployment,secrets}.yaml federation-controller-manager-deployment.yaml; do - echo "Creating manifest: ${file}" - $template "${manifests_root}/${file}" - $template "${manifests_root}/${file}" | $host_kubectl create -f - - done - - # Update the users kubeconfig to include federation-apiserver credentials. - CONTEXT=${FEDERATION_KUBE_CONTEXT} \ - KUBE_BEARER_TOKEN="${FEDERATION_API_TOKEN}" \ - KUBE_USER="${KUBE_USER}" \ - KUBE_PASSWORD="${KUBE_PASSWORD}" \ - SECONDARY_KUBECONFIG=true \ - create-kubeconfig - - # Don't finish provisioning until federation-apiserver pod is running - for i in {1..30};do - #TODO(colhom): in the future this needs to scale out for N pods. This assumes just one pod - phase="$($host_kubectl get -o=jsonpath pods -lapp=federated-cluster,module=federation-apiserver --template '{.items[*].status.phase}')" - echo "Waiting for federation-apiserver to be running...(phase= $phase)" - if [[ "$phase" == "Running" ]];then - echo "federation-apiserver pod is running!" - break - fi - - if [[ $i -eq 30 ]];then - echo "federation-apiserver pod is not running! giving up." - exit 1 - fi - - sleep 4 - done - - # Verify that federation-controller-manager pod is running. - for i in {1..30};do - #TODO(colhom): in the future this needs to scale out for N pods. This assumes just one pod - phase="$($host_kubectl get -o=jsonpath pods -lapp=federated-cluster,module=federation-controller-manager --template '{.items[*].status.phase}')" - echo "Waiting for federation-controller-manager to be running...(phase= $phase)" - if [[ "$phase" == "Running" ]];then - echo "federation-controller-manager pod is running!" - break - fi - - if [[ $i -eq 30 ]];then - echo "federation-controller-manager pod is not running! giving up." - exit 1 - fi - - sleep 4 - done -) -} - -# Creates the required certificates for federation apiserver. -# $1: The public IP or DNS name for the master. -# -# Assumed vars -# KUBE_TEMP -# MASTER_NAME -# IS_DNS_NAME=true|false -function create-federation-apiserver-certs { - local primary_cn - local sans - - if [[ "${IS_DNS_NAME:-}" == "true" ]];then - primary_cn="$(printf "${1}" | sha1sum | tr " -" " ")" - sans="DNS:${1},DNS:${MASTER_NAME}" - else - primary_cn="${1}" - sans="IP:${1},DNS:${MASTER_NAME}" - fi - - echo "Generating certs for alternate-names: ${sans}" - - local kube_temp="${KUBE_TEMP}/federation" - mkdir -p "${kube_temp}" - KUBE_TEMP="${kube_temp}" PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs - - local cert_dir="${kube_temp}/easy-rsa-master/easyrsa3" - # By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces. - # Note 'base64 -w0' doesn't work on Mac OS X, which has different flags. - FEDERATION_APISERVER_CA_CERT_BASE64=$(cat "${cert_dir}/pki/ca.crt" | base64 | tr -d '\r\n') - FEDERATION_APISERVER_CERT_BASE64=$(cat "${cert_dir}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n') - FEDERATION_APISERVER_KEY_BASE64=$(cat "${cert_dir}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n') -} - - -# Required -# FEDERATION_PUSH_REPO_BASE: the docker repo where federated images will be pushed -# FEDERATION_IMAGE_TAG: the tag of the image to be pushed -function push-federation-images { - : "${FEDERATION_PUSH_REPO_BASE?Must set FEDERATION_PUSH_REPO_BASE env var}" - : "${FEDERATION_IMAGE_TAG?Must set FEDERATION_IMAGE_TAG env var}" - - source "${KUBE_ROOT}/build/common.sh" - source "${KUBE_ROOT}/hack/lib/util.sh" - - local FEDERATION_BINARIES=${FEDERATION_BINARIES:-"hyperkube-amd64"} - - local bin_dir="${KUBE_ROOT}/_output/${KUBE_BUILD_STAGE}/server/${KUBE_PLATFORM}-${KUBE_ARCH}/kubernetes/server/bin" - - if [[ ! -d "${bin_dir}" ]];then - echo "${bin_dir} does not exist! Run make quick-release or make release" - exit 1 - fi - - for binary in ${FEDERATION_BINARIES}; do - local bin_path="${bin_dir}/${binary}" - - if [[ ! -f "${bin_path}" ]]; then - echo "${bin_path} does not exist!" - exit 1 - fi - - local docker_build_path="${bin_path}.dockerbuild" - local docker_file_path="${docker_build_path}/Dockerfile" - - rm -rf ${docker_build_path} - mkdir -p ${docker_build_path} - - ln "${bin_path}" "${docker_build_path}/${binary}" - printf " FROM debian:jessie \n ADD ${binary} /usr/local/bin/${binary}\n" > ${docker_file_path} - - local docker_image_tag="${FEDERATION_PUSH_REPO_BASE}/${binary}:${FEDERATION_IMAGE_TAG}" - - # Build the docker image on-the-fly. - # - # NOTE: This is only a temporary fix until the proposal in issue - # https://github.com/kubernetes/kubernetes/issues/28630 is implemented. - # Also, the new turn up mechanism completely obviates this step. - # - # TODO(madhusudancs): Remove this code when the new turn up mechanism work - # is merged. - kube::log::status "Building docker image ${docker_image_tag} from the binary" - docker build --pull -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null - - rm -rf ${docker_build_path} - - kube::log::status "Pushing ${docker_image_tag}" - if [[ "${FEDERATION_PUSH_REPO_BASE}" == "gcr.io/"* ]]; then - echo " -> GCR repository detected. Using gcloud" - gcloud docker -- push "${docker_image_tag}" - else - docker push "${docker_image_tag}" - fi - - kube::log::status "Deleting docker image ${docker_image_tag}" - docker rmi "${docker_image_tag}" 2>/dev/null || true - done -} - function cleanup-federation-api-objects { # This is a cleanup function. We cannot stop on errors here. So disable # errexit in this function. diff --git a/federation/cluster/federation-down.sh b/federation/cluster/federation-down.sh index 8aca4580399..dfbe63edae9 100755 --- a/federation/cluster/federation-down.sh +++ b/federation/cluster/federation-down.sh @@ -21,8 +21,7 @@ set -o pipefail KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) # For $FEDERATION_NAME, $FEDERATION_NAMESPACE, $FEDERATION_KUBE_CONTEXT, -# $HOST_CLUSTER_CONTEXT, $KUBEDNS_CONFIGMAP_NAME and -# $KUBEDNS_CONFIGMAP_NAMESPACE. +# and $HOST_CLUSTER_CONTEXT. source "${KUBE_ROOT}/federation/cluster/common.sh" # federation_clusters returns a list of all the clusters in diff --git a/federation/cluster/federation-up.sh b/federation/cluster/federation-up.sh index fd20e220490..913b639bf97 100755 --- a/federation/cluster/federation-up.sh +++ b/federation/cluster/federation-up.sh @@ -30,8 +30,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. # "${KUBE_ROOT}/cluster/lib/logging.sh" and DEFAULT_KUBECONFIG source "${KUBE_ROOT}/cluster/common.sh" # For $FEDERATION_NAME, $FEDERATION_NAMESPACE, $FEDERATION_KUBE_CONTEXT, -# $HOST_CLUSTER_CONTEXT, $KUBEDNS_CONFIGMAP_NAME, -# $KUBEDNS_CONFIGMAP_NAMESPACE and $KUBEDNS_FEDERATION_FLAG. +# and $HOST_CLUSTER_CONTEXT. source "${KUBE_ROOT}/federation/cluster/common.sh" DNS_ZONE_NAME="${FEDERATION_DNS_ZONE_NAME:-}" @@ -69,6 +68,29 @@ print json.load(sys.stdin)["KUBE_VERSION"]')" echo "${kube_version//+/_}" } +function wait_for_rbac() { + # The very first thing that kubefed does when it comes up is run RBAC API + # discovery. If it doesn't appear to be available, issue 'get role' to ensure + # that kubectl updates its cache. + ${KUBE_ROOT}/cluster/kubectl.sh get role + local i=1 + local timeout=60 + while [[ ${i} -le ${timeout} ]]; do + if [[ "$(${KUBE_ROOT}/cluster/kubectl.sh api-versions)" =~ "rbac.authorization.k8s.io/" ]]; then + break + fi + ${KUBE_ROOT}/cluster/kubectl.sh get role + sleep 1 + i=$((i+1)) + done + if [[ ${i} -gt ${timeout} ]]; then + kube::log::status "rbac.authorization.k8s.io API group not available after at least ${timeout} seconds:" + kube::log::status "$(${KUBE_ROOT}/cluster/kubectl.sh api-versions)" + exit 123 + fi + kube::log::status "rbac.authorization.k8s.io API group is available" +} + # Initializes the control plane. # TODO(madhusudancs): Move this to federation/develop.sh. function init() { @@ -81,17 +103,7 @@ function init() { kube::log::status "DNS_ZONE_NAME: \"${DNS_ZONE_NAME}\", DNS_PROVIDER: \"${DNS_PROVIDER}\"" kube::log::status "Image: \"${kube_registry}/hyperkube-amd64:${kube_version}\"" - # The very first thing that kubefed does when it comes up is run RBAC API - # discovery. If it doesn't appear to be available, issue 'get role' to ensure - # that kubectl updates its cache. - ${KUBE_ROOT}/cluster/kubectl.sh get role - timeout 1m bash < test.txt -$ MYVAR=foobar go run template.go test.txt -> hello world, MYVAR=foobar - -If you want the base64 version of any MYVAR, simple use {{.MYVAR_BASE64}} -*/ - -package main - -import ( - "encoding/base64" - "flag" - "fmt" - "io" - "os" - "path" - "strings" - "text/template" -) - -func main() { - flag.Parse() - env := make(map[string]string) - envList := os.Environ() - - for i := range envList { - pieces := strings.SplitN(envList[i], "=", 2) - if len(pieces) == 2 { - env[pieces[0]] = pieces[1] - env[pieces[0]+"_BASE64"] = base64.StdEncoding.EncodeToString([]byte(pieces[1])) - } else { - fmt.Fprintf(os.Stderr, "Invalid environ found: %s\n", envList[i]) - os.Exit(2) - } - } - - for i := 0; i < flag.NArg(); i++ { - inpath := flag.Arg(i) - - if err := templateYamlFile(env, inpath, os.Stdout); err != nil { - panic(err) - } - } -} - -func templateYamlFile(params map[string]string, inpath string, out io.Writer) error { - if tmpl, err := template.New(path.Base(inpath)).Option("missingkey=zero").ParseFiles(inpath); err != nil { - return err - } else { - if err := tmpl.Execute(out, params); err != nil { - return err - } - } - _, err := out.Write([]byte("\n---\n")) - return err -} diff --git a/federation/cmd/federation-controller-manager/app/BUILD b/federation/cmd/federation-controller-manager/app/BUILD index 2be5e85b6d3..a4fffac2a2b 100644 --- a/federation/cmd/federation-controller-manager/app/BUILD +++ b/federation/cmd/federation-controller-manager/app/BUILD @@ -23,9 +23,7 @@ go_library( "//federation/pkg/dnsprovider/providers/google/clouddns:go_default_library", "//federation/pkg/federatedtypes:go_default_library", "//federation/pkg/federation-controller/cluster:go_default_library", - "//federation/pkg/federation-controller/deployment:go_default_library", "//federation/pkg/federation-controller/ingress:go_default_library", - "//federation/pkg/federation-controller/namespace:go_default_library", "//federation/pkg/federation-controller/service:go_default_library", "//federation/pkg/federation-controller/service/dns:go_default_library", "//federation/pkg/federation-controller/sync:go_default_library", @@ -41,7 +39,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", ], diff --git a/federation/cmd/federation-controller-manager/app/controllermanager.go b/federation/cmd/federation-controller-manager/app/controllermanager.go index 17b37d4004b..9144eca97f4 100644 --- a/federation/cmd/federation-controller-manager/app/controllermanager.go +++ b/federation/cmd/federation-controller-manager/app/controllermanager.go @@ -30,16 +30,13 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/healthz" utilflag "k8s.io/apiserver/pkg/util/flag" - "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options" "k8s.io/kubernetes/federation/pkg/federatedtypes" clustercontroller "k8s.io/kubernetes/federation/pkg/federation-controller/cluster" - deploymentcontroller "k8s.io/kubernetes/federation/pkg/federation-controller/deployment" ingresscontroller "k8s.io/kubernetes/federation/pkg/federation-controller/ingress" - namespacecontroller "k8s.io/kubernetes/federation/pkg/federation-controller/namespace" servicecontroller "k8s.io/kubernetes/federation/pkg/federation-controller/service" servicednscontroller "k8s.io/kubernetes/federation/pkg/federation-controller/service/dns" synccontroller "k8s.io/kubernetes/federation/pkg/federation-controller/sync" @@ -152,29 +149,12 @@ func StartControllers(s *options.CMServer, restClientCfg *restclient.Config) err go serviceController.Run(s.ConcurrentServiceSyncs, wait.NeverStop) } - if controllerEnabled(s.Controllers, serverResources, namespacecontroller.ControllerName, namespacecontroller.RequiredResources, true) { - glog.V(3).Infof("Loading client config for namespace controller %q", namespacecontroller.UserAgentName) - nsClientset := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, namespacecontroller.UserAgentName)) - namespaceController := namespacecontroller.NewNamespaceController(nsClientset, dynamic.NewDynamicClientPool(restclient.AddUserAgent(restClientCfg, namespacecontroller.UserAgentName))) - glog.V(3).Infof("Running namespace controller") - namespaceController.Run(wait.NeverStop) - } - for kind, federatedType := range federatedtypes.FederatedTypes() { if controllerEnabled(s.Controllers, serverResources, federatedType.ControllerName, federatedType.RequiredResources, true) { synccontroller.StartFederationSyncController(kind, federatedType.AdapterFactory, restClientCfg, stopChan, minimizeLatency) } } - if controllerEnabled(s.Controllers, serverResources, deploymentcontroller.ControllerName, deploymentcontroller.RequiredResources, true) { - glog.V(3).Infof("Loading client config for deployment controller %q", deploymentcontroller.UserAgentName) - deploymentClientset := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, deploymentcontroller.UserAgentName)) - deploymentController := deploymentcontroller.NewDeploymentController(deploymentClientset) - glog.V(3).Infof("Running deployment controller") - // TODO: rename s.ConcurrentReplicaSetSyncs - go deploymentController.Run(s.ConcurrentReplicaSetSyncs, wait.NeverStop) - } - if controllerEnabled(s.Controllers, serverResources, ingresscontroller.ControllerName, ingresscontroller.RequiredResources, true) { glog.V(3).Infof("Loading client config for ingress controller %q", ingresscontroller.UserAgentName) ingClientset := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, ingresscontroller.UserAgentName)) diff --git a/federation/cmd/federation-controller-manager/app/options/BUILD b/federation/cmd/federation-controller-manager/app/options/BUILD index 12379369987..72e4bdfcde4 100644 --- a/federation/cmd/federation-controller-manager/app/options/BUILD +++ b/federation/cmd/federation-controller-manager/app/options/BUILD @@ -14,7 +14,7 @@ go_library( deps = [ "//federation/pkg/dnsprovider:go_default_library", "//pkg/apis/componentconfig:go_default_library", - "//pkg/client/leaderelection:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", diff --git a/federation/cmd/federation-controller-manager/app/options/options.go b/federation/cmd/federation-controller-manager/app/options/options.go index 2a6768fcc0f..e6659c36977 100644 --- a/federation/cmd/federation-controller-manager/app/options/options.go +++ b/federation/cmd/federation-controller-manager/app/options/options.go @@ -28,7 +28,7 @@ import ( utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/kubernetes/federation/pkg/dnsprovider" "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/client/leaderelection" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" ) type ControllerManagerConfiguration struct { @@ -98,7 +98,7 @@ func NewCMServer() *CMServer { ClusterMonitorPeriod: metav1.Duration{Duration: 40 * time.Second}, APIServerQPS: 20.0, APIServerBurst: 30, - LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(), + LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), Controllers: make(utilflag.ConfigurationMap), }, } @@ -129,5 +129,5 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { "A set of key=value pairs that describe controller configuration "+ "to enable/disable specific controllers. Key should be the resource name (like services) and value should be true or false. "+ "For example: services=false,ingresses=false") - leaderelection.BindFlags(&s.LeaderElection, fs) + leaderelectionconfig.BindFlags(&s.LeaderElection, fs) } diff --git a/federation/pkg/federatedtypes/BUILD b/federation/pkg/federatedtypes/BUILD index 3391d597462..7bb01c29d64 100644 --- a/federation/pkg/federatedtypes/BUILD +++ b/federation/pkg/federatedtypes/BUILD @@ -14,6 +14,9 @@ go_library( "adapter.go", "configmap.go", "daemonset.go", + "deployment.go", + "namespace.go", + "qualifiedname.go", "registry.go", "replicaset.go", "scheduling.go", @@ -28,15 +31,20 @@ go_library( "//federation/pkg/federation-controller/util/planner:go_default_library", "//federation/pkg/federation-controller/util/podanalyzer:go_default_library", "//federation/pkg/federation-controller/util/replicapreferences:go_default_library", + "//pkg/api:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/controller/namespace/deletion:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) @@ -58,7 +66,7 @@ filegroup( go_test( name = "go_default_test", - srcs = ["replicaset_test.go"], + srcs = ["scheduling_test.go"], library = ":go_default_library", tags = ["automanaged"], deps = [ @@ -66,5 +74,6 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", ], ) diff --git a/federation/pkg/federatedtypes/adapter.go b/federation/pkg/federatedtypes/adapter.go index 1da54a75547..2e59bb90603 100644 --- a/federation/pkg/federatedtypes/adapter.go +++ b/federation/pkg/federatedtypes/adapter.go @@ -19,8 +19,8 @@ package federatedtypes import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" ) @@ -34,21 +34,21 @@ type FederatedTypeAdapter interface { IsExpectedType(obj interface{}) bool Copy(obj pkgruntime.Object) pkgruntime.Object Equivalent(obj1, obj2 pkgruntime.Object) bool - NamespacedName(obj pkgruntime.Object) types.NamespacedName + QualifiedName(obj pkgruntime.Object) QualifiedName ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta // Fed* operations target the federation control plane FedCreate(obj pkgruntime.Object) (pkgruntime.Object, error) - FedDelete(namespacedName types.NamespacedName, options *metav1.DeleteOptions) error - FedGet(namespacedName types.NamespacedName) (pkgruntime.Object, error) + FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error + FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) FedUpdate(obj pkgruntime.Object) (pkgruntime.Object, error) FedWatch(namespace string, options metav1.ListOptions) (watch.Interface, error) // The following operations are intended to target a cluster that is a member of a federation ClusterCreate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) - ClusterDelete(client kubeclientset.Interface, nsName types.NamespacedName, options *metav1.DeleteOptions) error - ClusterGet(client kubeclientset.Interface, namespacedName types.NamespacedName) (pkgruntime.Object, error) + ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error + ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) ClusterUpdate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) ClusterWatch(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (watch.Interface, error) @@ -62,7 +62,7 @@ type FederatedTypeAdapter interface { // that create instances of FederatedTypeAdapter. Such methods should // be registered with RegisterAdapterFactory to ensure the type // adapter is discoverable. -type AdapterFactory func(client federationclientset.Interface) FederatedTypeAdapter +type AdapterFactory func(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter // SetAnnotation sets the given key and value in the given object's ObjectMeta.Annotations map func SetAnnotation(adapter FederatedTypeAdapter, obj pkgruntime.Object, key, value string) { @@ -75,5 +75,5 @@ func SetAnnotation(adapter FederatedTypeAdapter, obj pkgruntime.Object, key, val // ObjectKey returns a cluster-unique key for the given object func ObjectKey(adapter FederatedTypeAdapter, obj pkgruntime.Object) string { - return adapter.NamespacedName(obj).String() + return adapter.QualifiedName(obj).String() } diff --git a/federation/pkg/federatedtypes/configmap.go b/federation/pkg/federatedtypes/configmap.go index a0de1888b71..bc2c2ac3eff 100644 --- a/federation/pkg/federatedtypes/configmap.go +++ b/federation/pkg/federatedtypes/configmap.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/federation/pkg/federation-controller/util" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -41,7 +41,7 @@ type ConfigMapAdapter struct { client federationclientset.Interface } -func NewConfigMapAdapter(client federationclientset.Interface) FederatedTypeAdapter { +func NewConfigMapAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { return &ConfigMapAdapter{client: client} } @@ -72,9 +72,9 @@ func (a *ConfigMapAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { return util.ConfigMapEquivalent(configmap1, configmap2) } -func (a *ConfigMapAdapter) NamespacedName(obj pkgruntime.Object) types.NamespacedName { +func (a *ConfigMapAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { configmap := obj.(*apiv1.ConfigMap) - return types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name} + return QualifiedName{Namespace: configmap.Namespace, Name: configmap.Name} } func (a *ConfigMapAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { @@ -86,12 +86,12 @@ func (a *ConfigMapAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, return a.client.CoreV1().ConfigMaps(configmap.Namespace).Create(configmap) } -func (a *ConfigMapAdapter) FedDelete(namespacedName types.NamespacedName, options *metav1.DeleteOptions) error { - return a.client.CoreV1().ConfigMaps(namespacedName.Namespace).Delete(namespacedName.Name, options) +func (a *ConfigMapAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.CoreV1().ConfigMaps(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *ConfigMapAdapter) FedGet(namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return a.client.CoreV1().ConfigMaps(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *ConfigMapAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.CoreV1().ConfigMaps(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *ConfigMapAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { @@ -112,12 +112,12 @@ func (a *ConfigMapAdapter) ClusterCreate(client kubeclientset.Interface, obj pkg return client.CoreV1().ConfigMaps(configmap.Namespace).Create(configmap) } -func (a *ConfigMapAdapter) ClusterDelete(client kubeclientset.Interface, nsName types.NamespacedName, options *metav1.DeleteOptions) error { - return client.CoreV1().ConfigMaps(nsName.Namespace).Delete(nsName.Name, options) +func (a *ConfigMapAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.CoreV1().ConfigMaps(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *ConfigMapAdapter) ClusterGet(client kubeclientset.Interface, namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return client.CoreV1().ConfigMaps(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *ConfigMapAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.CoreV1().ConfigMaps(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *ConfigMapAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { diff --git a/federation/pkg/federatedtypes/crudtester/crudtester.go b/federation/pkg/federatedtypes/crudtester/crudtester.go index 45a7c5ea6f3..f11768054c8 100644 --- a/federation/pkg/federatedtypes/crudtester/crudtester.go +++ b/federation/pkg/federatedtypes/crudtester/crudtester.go @@ -17,6 +17,7 @@ limitations under the License. package crudtester import ( + "fmt" "time" "k8s.io/apimachinery/pkg/api/errors" @@ -76,15 +77,20 @@ func (c *FederatedTypeCRUDTester) CheckLifecycle(desiredObject pkgruntime.Object func (c *FederatedTypeCRUDTester) Create(desiredObject pkgruntime.Object) pkgruntime.Object { namespace := c.adapter.ObjectMeta(desiredObject).Namespace - c.tl.Logf("Creating new federated %s in namespace %q", c.kind, namespace) + resourceMsg := fmt.Sprintf("federated %s", c.kind) + if len(namespace) > 0 { + resourceMsg = fmt.Sprintf("%s in namespace %q", resourceMsg, namespace) + } + + c.tl.Logf("Creating new %s", resourceMsg) obj, err := c.adapter.FedCreate(desiredObject) if err != nil { - c.tl.Fatalf("Error creating federated %s in namespace %q : %v", c.kind, namespace, err) + c.tl.Fatalf("Error creating %s: %v", resourceMsg, err) } - namespacedName := c.adapter.NamespacedName(obj) - c.tl.Logf("Created new federated %s %q", c.kind, namespacedName) + qualifiedName := c.adapter.QualifiedName(obj) + c.tl.Logf("Created new federated %s %q", c.kind, qualifiedName) return obj } @@ -98,7 +104,7 @@ func (c *FederatedTypeCRUDTester) CheckCreate(desiredObject pkgruntime.Object) p } func (c *FederatedTypeCRUDTester) CheckUpdate(obj pkgruntime.Object) { - namespacedName := c.adapter.NamespacedName(obj) + qualifiedName := c.adapter.QualifiedName(obj) var initialAnnotation string meta := c.adapter.ObjectMeta(obj) @@ -106,29 +112,29 @@ func (c *FederatedTypeCRUDTester) CheckUpdate(obj pkgruntime.Object) { initialAnnotation = meta.Annotations[AnnotationTestFederationCRUDUpdate] } - c.tl.Logf("Updating federated %s %q", c.kind, namespacedName) + c.tl.Logf("Updating federated %s %q", c.kind, qualifiedName) updatedObj, err := c.updateFedObject(obj) if err != nil { - c.tl.Fatalf("Error updating federated %s %q: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Error updating federated %s %q: %v", c.kind, qualifiedName, err) } // updateFedObject is expected to have changed the value of the annotation meta = c.adapter.ObjectMeta(updatedObj) updatedAnnotation := meta.Annotations[AnnotationTestFederationCRUDUpdate] if updatedAnnotation == initialAnnotation { - c.tl.Fatalf("Federated %s %q not mutated", c.kind, namespacedName) + c.tl.Fatalf("Federated %s %q not mutated", c.kind, qualifiedName) } c.CheckPropagation(updatedObj) } func (c *FederatedTypeCRUDTester) CheckDelete(obj pkgruntime.Object, orphanDependents *bool) { - namespacedName := c.adapter.NamespacedName(obj) + qualifiedName := c.adapter.QualifiedName(obj) - c.tl.Logf("Deleting federated %s %q", c.kind, namespacedName) - err := c.adapter.FedDelete(namespacedName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) + c.tl.Logf("Deleting federated %s %q", c.kind, qualifiedName) + err := c.adapter.FedDelete(qualifiedName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) if err != nil { - c.tl.Fatalf("Error deleting federated %s %q: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Error deleting federated %s %q: %v", c.kind, qualifiedName, err) } deletingInCluster := (orphanDependents != nil && *orphanDependents == false) @@ -142,14 +148,14 @@ func (c *FederatedTypeCRUDTester) CheckDelete(obj pkgruntime.Object, orphanDepen // Wait for deletion. The federation resource will only be removed once orphan deletion has been // completed or deemed unnecessary. err = wait.PollImmediate(c.waitInterval, waitTimeout, func() (bool, error) { - _, err := c.adapter.FedGet(namespacedName) + _, err := c.adapter.FedGet(qualifiedName) if errors.IsNotFound(err) { return true, nil } return false, err }) if err != nil { - c.tl.Fatalf("Error deleting federated %s %q: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Error deleting federated %s %q: %v", c.kind, qualifiedName, err) } var stateMsg string = "present" @@ -157,14 +163,14 @@ func (c *FederatedTypeCRUDTester) CheckDelete(obj pkgruntime.Object, orphanDepen stateMsg = "not present" } for _, client := range c.clusterClients { - _, err := c.adapter.ClusterGet(client, namespacedName) + _, err := c.adapter.ClusterGet(client, qualifiedName) switch { case !deletingInCluster && errors.IsNotFound(err): - c.tl.Fatalf("Federated %s %q was unexpectedly deleted from a member cluster", c.kind, namespacedName) + c.tl.Fatalf("Federated %s %q was unexpectedly deleted from a member cluster", c.kind, qualifiedName) case deletingInCluster && err == nil: - c.tl.Fatalf("Federated %s %q was unexpectedly orphaned in a member cluster", c.kind, namespacedName) + c.tl.Fatalf("Federated %s %q was unexpectedly orphaned in a member cluster", c.kind, qualifiedName) case err != nil && !errors.IsNotFound(err): - c.tl.Fatalf("Error while checking whether %s %q is %s in member clusters: %v", c.kind, namespacedName, stateMsg, err) + c.tl.Fatalf("Error while checking whether %s %q is %s in member clusters: %v", c.kind, qualifiedName, stateMsg, err) } } } @@ -176,26 +182,26 @@ func (c *FederatedTypeCRUDTester) CheckPropagation(obj pkgruntime.Object) { // CheckPropagationForClients checks propagation for the provided clients func (c *FederatedTypeCRUDTester) CheckPropagationForClients(obj pkgruntime.Object, clusterClients []clientset.Interface, objExpected bool) { - namespacedName := c.adapter.NamespacedName(obj) + qualifiedName := c.adapter.QualifiedName(obj) - c.tl.Logf("Waiting for %s %q in %d clusters", c.kind, namespacedName, len(clusterClients)) + c.tl.Logf("Waiting for %s %q in %d clusters", c.kind, qualifiedName, len(clusterClients)) for _, client := range clusterClients { err := c.waitForResource(client, obj) switch { case err == wait.ErrWaitTimeout: if objExpected { - c.tl.Fatalf("Timeout verifying %s %q in a member cluster: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Timeout verifying %s %q in a member cluster: %v", c.kind, qualifiedName, err) } case err != nil: - c.tl.Fatalf("Failed to verify %s %q in a member cluster: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Failed to verify %s %q in a member cluster: %v", c.kind, qualifiedName, err) case err == nil && !objExpected: - c.tl.Fatalf("Found unexpected object %s %q in a member cluster: %v", c.kind, namespacedName, err) + c.tl.Fatalf("Found unexpected object %s %q in a member cluster: %v", c.kind, qualifiedName, err) } } } func (c *FederatedTypeCRUDTester) waitForResource(client clientset.Interface, obj pkgruntime.Object) error { - namespacedName := c.adapter.NamespacedName(obj) + qualifiedName := c.adapter.QualifiedName(obj) err := wait.PollImmediate(c.waitInterval, c.clusterWaitTimeout, func() (bool, error) { equivalenceFunc := c.adapter.Equivalent if c.adapter.IsSchedulingAdapter() { @@ -206,7 +212,7 @@ func (c *FederatedTypeCRUDTester) waitForResource(client clientset.Interface, ob equivalenceFunc = schedulingAdapter.EquivalentIgnoringSchedule } - clusterObj, err := c.adapter.ClusterGet(client, namespacedName) + clusterObj, err := c.adapter.ClusterGet(client, qualifiedName) if err == nil && equivalenceFunc(clusterObj, obj) { return true, nil } @@ -227,8 +233,8 @@ func (c *FederatedTypeCRUDTester) updateFedObject(obj pkgruntime.Object) (pkgrun if errors.IsConflict(err) { // The resource was updated by the federation controller. // Get the latest version and retry. - namespacedName := c.adapter.NamespacedName(obj) - obj, err = c.adapter.FedGet(namespacedName) + qualifiedName := c.adapter.QualifiedName(obj) + obj, err = c.adapter.FedGet(qualifiedName) return false, err } // Be tolerant of a slow server diff --git a/federation/pkg/federatedtypes/daemonset.go b/federation/pkg/federatedtypes/daemonset.go index 3094afd2994..6216141f623 100644 --- a/federation/pkg/federatedtypes/daemonset.go +++ b/federation/pkg/federatedtypes/daemonset.go @@ -24,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/federation/pkg/federation-controller/util" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -44,7 +44,7 @@ type DaemonSetAdapter struct { client federationclientset.Interface } -func NewDaemonSetAdapter(client federationclientset.Interface) FederatedTypeAdapter { +func NewDaemonSetAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { return &DaemonSetAdapter{client: client} } @@ -75,9 +75,9 @@ func (a *DaemonSetAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { return util.ObjectMetaEquivalent(daemonset1.ObjectMeta, daemonset2.ObjectMeta) && reflect.DeepEqual(daemonset1.Spec, daemonset2.Spec) } -func (a *DaemonSetAdapter) NamespacedName(obj pkgruntime.Object) types.NamespacedName { +func (a *DaemonSetAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { daemonset := obj.(*extensionsv1.DaemonSet) - return types.NamespacedName{Namespace: daemonset.Namespace, Name: daemonset.Name} + return QualifiedName{Namespace: daemonset.Namespace, Name: daemonset.Name} } func (a *DaemonSetAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { @@ -89,12 +89,12 @@ func (a *DaemonSetAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, return a.client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset) } -func (a *DaemonSetAdapter) FedDelete(namespacedName types.NamespacedName, options *metav1.DeleteOptions) error { - return a.client.Extensions().DaemonSets(namespacedName.Namespace).Delete(namespacedName.Name, options) +func (a *DaemonSetAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.Extensions().DaemonSets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *DaemonSetAdapter) FedGet(namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return a.client.Extensions().DaemonSets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *DaemonSetAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.Extensions().DaemonSets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *DaemonSetAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { @@ -115,12 +115,12 @@ func (a *DaemonSetAdapter) ClusterCreate(client kubeclientset.Interface, obj pkg return client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset) } -func (a *DaemonSetAdapter) ClusterDelete(client kubeclientset.Interface, nsName types.NamespacedName, options *metav1.DeleteOptions) error { - return client.Extensions().DaemonSets(nsName.Namespace).Delete(nsName.Name, options) +func (a *DaemonSetAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.Extensions().DaemonSets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *DaemonSetAdapter) ClusterGet(client kubeclientset.Interface, namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return client.Extensions().DaemonSets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *DaemonSetAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.Extensions().DaemonSets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *DaemonSetAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { diff --git a/federation/pkg/federatedtypes/deployment.go b/federation/pkg/federatedtypes/deployment.go new file mode 100644 index 00000000000..cea19239210 --- /dev/null +++ b/federation/pkg/federatedtypes/deployment.go @@ -0,0 +1,188 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedtypes + +import ( + apiv1 "k8s.io/api/core/v1" + extensionsv1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" + fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util" + kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" +) + +const ( + DeploymentKind = "deployment" + DeploymentControllerName = "deployments" + FedDeploymentPreferencesAnnotation = "federation.kubernetes.io/deployment-preferences" +) + +func init() { + RegisterFederatedType(DeploymentKind, DeploymentControllerName, []schema.GroupVersionResource{extensionsv1.SchemeGroupVersion.WithResource(DeploymentControllerName)}, NewDeploymentAdapter) +} + +type DeploymentAdapter struct { + *schedulingAdapter + client federationclientset.Interface +} + +func NewDeploymentAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { + schedulingAdapter := schedulingAdapter{ + preferencesAnnotationName: FedDeploymentPreferencesAnnotation, + updateStatusFunc: func(obj pkgruntime.Object, status SchedulingStatus) error { + deployment := obj.(*extensionsv1.Deployment) + if status.Replicas != deployment.Status.Replicas || status.UpdatedReplicas != deployment.Status.UpdatedReplicas || + status.ReadyReplicas != deployment.Status.ReadyReplicas || status.AvailableReplicas != deployment.Status.AvailableReplicas { + deployment.Status = extensionsv1.DeploymentStatus{ + Replicas: status.Replicas, + UpdatedReplicas: status.UpdatedReplicas, + ReadyReplicas: status.ReadyReplicas, + AvailableReplicas: status.AvailableReplicas, + } + _, err := client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment) + return err + } + return nil + }, + } + + return &DeploymentAdapter{&schedulingAdapter, client} +} + +func (a *DeploymentAdapter) Kind() string { + return DeploymentKind +} + +func (a *DeploymentAdapter) ObjectType() pkgruntime.Object { + return &extensionsv1.Deployment{} +} + +func (a *DeploymentAdapter) IsExpectedType(obj interface{}) bool { + _, ok := obj.(*extensionsv1.Deployment) + return ok +} + +func (a *DeploymentAdapter) Copy(obj pkgruntime.Object) pkgruntime.Object { + deployment := obj.(*extensionsv1.Deployment) + return fedutil.DeepCopyDeployment(deployment) +} + +func (a *DeploymentAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { + deployment1 := obj1.(*extensionsv1.Deployment) + deployment2 := obj2.(*extensionsv1.Deployment) + return fedutil.DeploymentEquivalent(deployment1, deployment2) +} + +func (a *DeploymentAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { + deployment := obj.(*extensionsv1.Deployment) + return QualifiedName{Namespace: deployment.Namespace, Name: deployment.Name} +} + +func (a *DeploymentAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { + return &obj.(*extensionsv1.Deployment).ObjectMeta +} + +func (a *DeploymentAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, error) { + deployment := obj.(*extensionsv1.Deployment) + return a.client.Extensions().Deployments(deployment.Namespace).Create(deployment) +} + +func (a *DeploymentAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.Extensions().Deployments(qualifiedName.Namespace).Delete(qualifiedName.Name, options) +} + +func (a *DeploymentAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.Extensions().Deployments(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) +} + +func (a *DeploymentAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { + return a.client.Extensions().Deployments(namespace).List(options) +} + +func (a *DeploymentAdapter) FedUpdate(obj pkgruntime.Object) (pkgruntime.Object, error) { + deployment := obj.(*extensionsv1.Deployment) + return a.client.Extensions().Deployments(deployment.Namespace).Update(deployment) +} + +func (a *DeploymentAdapter) FedWatch(namespace string, options metav1.ListOptions) (watch.Interface, error) { + return a.client.Extensions().Deployments(namespace).Watch(options) +} + +func (a *DeploymentAdapter) ClusterCreate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) { + deployment := obj.(*extensionsv1.Deployment) + return client.Extensions().Deployments(deployment.Namespace).Create(deployment) +} + +func (a *DeploymentAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.Extensions().Deployments(qualifiedName.Namespace).Delete(qualifiedName.Name, options) +} + +func (a *DeploymentAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.Extensions().Deployments(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) +} + +func (a *DeploymentAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { + return client.Extensions().Deployments(namespace).List(options) +} + +func (a *DeploymentAdapter) ClusterUpdate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) { + deployment := obj.(*extensionsv1.Deployment) + return client.Extensions().Deployments(deployment.Namespace).Update(deployment) +} + +func (a *DeploymentAdapter) ClusterWatch(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (watch.Interface, error) { + return client.Extensions().Deployments(namespace).Watch(options) +} + +func (a *DeploymentAdapter) EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Object) bool { + deployment1 := obj1.(*extensionsv1.Deployment) + deployment2 := a.Copy(obj2).(*extensionsv1.Deployment) + deployment2.Spec.Replicas = deployment1.Spec.Replicas + return fedutil.DeploymentEquivalent(deployment1, deployment2) +} + +func (a *DeploymentAdapter) NewTestObject(namespace string) pkgruntime.Object { + replicas := int32(3) + zero := int64(0) + return &extensionsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-deployment-", + Namespace: namespace, + }, + Spec: extensionsv1.DeploymentSpec{ + Replicas: &replicas, + Template: apiv1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: apiv1.PodSpec{ + TerminationGracePeriodSeconds: &zero, + Containers: []apiv1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } +} diff --git a/federation/pkg/federatedtypes/namespace.go b/federation/pkg/federatedtypes/namespace.go new file mode 100644 index 00000000000..43eadfac145 --- /dev/null +++ b/federation/pkg/federatedtypes/namespace.go @@ -0,0 +1,215 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedtypes + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" + "k8s.io/kubernetes/federation/pkg/federation-controller/util" + "k8s.io/kubernetes/pkg/api" + kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/controller/namespace/deletion" + + "github.com/golang/glog" +) + +const ( + NamespaceKind = "namespace" + NamespaceControllerName = "namespaces" +) + +func init() { + RegisterFederatedType(NamespaceKind, NamespaceControllerName, []schema.GroupVersionResource{apiv1.SchemeGroupVersion.WithResource(NamespaceControllerName)}, NewNamespaceAdapter) +} + +type NamespaceAdapter struct { + client federationclientset.Interface + deleter deletion.NamespacedResourcesDeleterInterface +} + +func NewNamespaceAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { + dynamicClientPool := dynamic.NewDynamicClientPool(config) + discoverResourcesFunc := client.Discovery().ServerPreferredNamespacedResources + deleter := deletion.NewNamespacedResourcesDeleter( + client.Core().Namespaces(), + dynamicClientPool, + nil, + discoverResourcesFunc, + apiv1.FinalizerKubernetes, + false) + return &NamespaceAdapter{client: client, deleter: deleter} +} + +func (a *NamespaceAdapter) Kind() string { + return NamespaceKind +} + +func (a *NamespaceAdapter) ObjectType() pkgruntime.Object { + return &apiv1.Namespace{} +} + +func (a *NamespaceAdapter) IsExpectedType(obj interface{}) bool { + _, ok := obj.(*apiv1.Namespace) + return ok +} + +func (a *NamespaceAdapter) Copy(obj pkgruntime.Object) pkgruntime.Object { + namespace := obj.(*apiv1.Namespace) + return &apiv1.Namespace{ + ObjectMeta: util.DeepCopyRelevantObjectMeta(namespace.ObjectMeta), + Spec: *(util.DeepCopyApiTypeOrPanic(&namespace.Spec).(*apiv1.NamespaceSpec)), + } +} + +func (a *NamespaceAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { + return util.ObjectMetaAndSpecEquivalent(obj1, obj2) +} + +func (a *NamespaceAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { + namespace := obj.(*apiv1.Namespace) + return QualifiedName{Name: namespace.Name} +} + +func (a *NamespaceAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { + return &obj.(*apiv1.Namespace).ObjectMeta +} + +func (a *NamespaceAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, error) { + namespace := obj.(*apiv1.Namespace) + return a.client.CoreV1().Namespaces().Create(namespace) +} + +func (a *NamespaceAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.CoreV1().Namespaces().Delete(qualifiedName.Name, options) +} + +func (a *NamespaceAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.CoreV1().Namespaces().Get(qualifiedName.Name, metav1.GetOptions{}) +} + +func (a *NamespaceAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { + return a.client.CoreV1().Namespaces().List(options) +} + +func (a *NamespaceAdapter) FedUpdate(obj pkgruntime.Object) (pkgruntime.Object, error) { + namespace := obj.(*apiv1.Namespace) + return a.client.CoreV1().Namespaces().Update(namespace) +} + +func (a *NamespaceAdapter) FedWatch(namespace string, options metav1.ListOptions) (watch.Interface, error) { + return a.client.CoreV1().Namespaces().Watch(options) +} + +func (a *NamespaceAdapter) ClusterCreate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) { + namespace := obj.(*apiv1.Namespace) + return client.CoreV1().Namespaces().Create(namespace) +} + +func (a *NamespaceAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.CoreV1().Namespaces().Delete(qualifiedName.Name, options) +} + +func (a *NamespaceAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.CoreV1().Namespaces().Get(qualifiedName.Name, metav1.GetOptions{}) +} + +func (a *NamespaceAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { + return client.CoreV1().Namespaces().List(options) +} + +func (a *NamespaceAdapter) ClusterUpdate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) { + namespace := obj.(*apiv1.Namespace) + return client.CoreV1().Namespaces().Update(namespace) +} + +func (a *NamespaceAdapter) ClusterWatch(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (watch.Interface, error) { + return client.CoreV1().Namespaces().Watch(options) +} + +func (a *NamespaceAdapter) IsSchedulingAdapter() bool { + return false +} + +func (a *NamespaceAdapter) NewTestObject(namespace string) pkgruntime.Object { + return &apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-namespace-", + }, + Spec: apiv1.NamespaceSpec{ + Finalizers: []apiv1.FinalizerName{apiv1.FinalizerKubernetes}, + }, + } +} + +// CleanUpNamespace deletes all resources in a given namespace. +func (a *NamespaceAdapter) CleanUpNamespace(obj pkgruntime.Object, eventRecorder record.EventRecorder) (pkgruntime.Object, error) { + namespace := obj.(*apiv1.Namespace) + name := namespace.Name + + // Set Terminating status. + updatedNamespace := &apiv1.Namespace{ + ObjectMeta: namespace.ObjectMeta, + Spec: namespace.Spec, + Status: apiv1.NamespaceStatus{ + Phase: apiv1.NamespaceTerminating, + }, + } + var err error + if namespace.Status.Phase != apiv1.NamespaceTerminating { + glog.V(2).Infof("Marking ns %s as terminating", name) + eventRecorder.Event(namespace, api.EventTypeNormal, "DeleteNamespace", fmt.Sprintf("Marking for deletion")) + _, err = a.FedUpdate(updatedNamespace) + if err != nil { + return nil, fmt.Errorf("failed to update namespace: %v", err) + } + } + + if hasFinalizerInSpec(updatedNamespace, apiv1.FinalizerKubernetes) { + // Delete resources in this namespace. + err = a.deleter.Delete(name) + if err != nil { + return nil, fmt.Errorf("error in deleting resources in namespace %s: %v", name, err) + } + glog.V(2).Infof("Removed kubernetes finalizer from ns %s", name) + // Fetch the updated Namespace. + obj, err = a.FedGet(QualifiedName{Name: name}) + updatedNamespace = obj.(*apiv1.Namespace) + if err != nil { + return nil, fmt.Errorf("error in fetching updated namespace %s: %s", name, err) + } + } + + return updatedNamespace, nil +} + +func hasFinalizerInSpec(namespace *apiv1.Namespace, finalizer apiv1.FinalizerName) bool { + for i := range namespace.Spec.Finalizers { + if namespace.Spec.Finalizers[i] == finalizer { + return true + } + } + return false +} diff --git a/federation/pkg/federatedtypes/qualifiedname.go b/federation/pkg/federatedtypes/qualifiedname.go new file mode 100644 index 00000000000..95f0df11042 --- /dev/null +++ b/federation/pkg/federatedtypes/qualifiedname.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedtypes + +import ( + "fmt" +) + +// QualifiedName comprises a resource name with an optional namespace. +// If namespace is provided, a QualifiedName will be rendered as +// "/". If not, it will be rendered as "name". This +// is intended to allow the FederatedTypeAdapter interface and its +// consumers to operate on both namespaces and namespace-qualified +// resources. + +type QualifiedName struct { + Namespace string + Name string +} + +// String returns the general purpose string representation +func (n QualifiedName) String() string { + if len(n.Namespace) == 0 { + return n.Name + } + return fmt.Sprintf("%s/%s", n.Namespace, n.Name) +} diff --git a/federation/pkg/federatedtypes/replicaset.go b/federation/pkg/federatedtypes/replicaset.go index 85f18597a78..6f0b3a909ab 100644 --- a/federation/pkg/federatedtypes/replicaset.go +++ b/federation/pkg/federatedtypes/replicaset.go @@ -17,26 +17,15 @@ limitations under the License. package federatedtypes import ( - "bytes" - "fmt" - "sort" - "time" - - "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" - fedapi "k8s.io/kubernetes/federation/apis/federation" - fedv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" + restclient "k8s.io/client-go/rest" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/planner" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" ) @@ -46,28 +35,35 @@ const ( FedReplicaSetPreferencesAnnotation = "federation.kubernetes.io/replica-set-preferences" ) -type replicaSetUserInfo struct { - scheduleResult (map[string]int64) - fedStatus *extensionsv1.ReplicaSetStatus -} - func init() { RegisterFederatedType(ReplicaSetKind, ReplicaSetControllerName, []schema.GroupVersionResource{extensionsv1.SchemeGroupVersion.WithResource(ReplicaSetControllerName)}, NewReplicaSetAdapter) } type ReplicaSetAdapter struct { - client federationclientset.Interface - defaultPlanner *planner.Planner + *schedulingAdapter + client federationclientset.Interface } -func NewReplicaSetAdapter(client federationclientset.Interface) FederatedTypeAdapter { - return &ReplicaSetAdapter{ - client: client, - defaultPlanner: planner.NewPlanner(&fedapi.ReplicaAllocationPreferences{ - Clusters: map[string]fedapi.ClusterPreferences{ - "*": {Weight: 1}, - }, - })} +func NewReplicaSetAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { + schedulingAdapter := schedulingAdapter{ + preferencesAnnotationName: FedReplicaSetPreferencesAnnotation, + updateStatusFunc: func(obj pkgruntime.Object, status SchedulingStatus) error { + rs := obj.(*extensionsv1.ReplicaSet) + if status.Replicas != rs.Status.Replicas || status.FullyLabeledReplicas != rs.Status.FullyLabeledReplicas || + status.ReadyReplicas != rs.Status.ReadyReplicas || status.AvailableReplicas != rs.Status.AvailableReplicas { + rs.Status = extensionsv1.ReplicaSetStatus{ + Replicas: status.Replicas, + FullyLabeledReplicas: status.Replicas, + ReadyReplicas: status.ReadyReplicas, + AvailableReplicas: status.AvailableReplicas, + } + _, err := client.Extensions().ReplicaSets(rs.Namespace).UpdateStatus(rs) + return err + } + return nil + }, + } + return &ReplicaSetAdapter{&schedulingAdapter, client} } func (a *ReplicaSetAdapter) Kind() string { @@ -92,14 +88,12 @@ func (a *ReplicaSetAdapter) Copy(obj pkgruntime.Object) pkgruntime.Object { } func (a *ReplicaSetAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { - replicaset1 := obj1.(*extensionsv1.ReplicaSet) - replicaset2 := obj2.(*extensionsv1.ReplicaSet) - return fedutil.ObjectMetaAndSpecEquivalent(replicaset1, replicaset2) + return fedutil.ObjectMetaAndSpecEquivalent(obj1, obj2) } -func (a *ReplicaSetAdapter) NamespacedName(obj pkgruntime.Object) types.NamespacedName { +func (a *ReplicaSetAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { replicaset := obj.(*extensionsv1.ReplicaSet) - return types.NamespacedName{Namespace: replicaset.Namespace, Name: replicaset.Name} + return QualifiedName{Namespace: replicaset.Namespace, Name: replicaset.Name} } func (a *ReplicaSetAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { @@ -111,12 +105,12 @@ func (a *ReplicaSetAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, return a.client.Extensions().ReplicaSets(replicaset.Namespace).Create(replicaset) } -func (a *ReplicaSetAdapter) FedDelete(namespacedName types.NamespacedName, options *metav1.DeleteOptions) error { - return a.client.Extensions().ReplicaSets(namespacedName.Namespace).Delete(namespacedName.Name, options) +func (a *ReplicaSetAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.Extensions().ReplicaSets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *ReplicaSetAdapter) FedGet(namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return a.client.Extensions().ReplicaSets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *ReplicaSetAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.Extensions().ReplicaSets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *ReplicaSetAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { @@ -137,12 +131,12 @@ func (a *ReplicaSetAdapter) ClusterCreate(client kubeclientset.Interface, obj pk return client.Extensions().ReplicaSets(replicaset.Namespace).Create(replicaset) } -func (a *ReplicaSetAdapter) ClusterDelete(client kubeclientset.Interface, nsName types.NamespacedName, options *metav1.DeleteOptions) error { - return client.Extensions().ReplicaSets(nsName.Namespace).Delete(nsName.Name, options) +func (a *ReplicaSetAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.Extensions().ReplicaSets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *ReplicaSetAdapter) ClusterGet(client kubeclientset.Interface, namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return client.Extensions().ReplicaSets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *ReplicaSetAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.Extensions().ReplicaSets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *ReplicaSetAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { @@ -158,79 +152,6 @@ func (a *ReplicaSetAdapter) ClusterWatch(client kubeclientset.Interface, namespa return client.Extensions().ReplicaSets(namespace).Watch(options) } -func (a *ReplicaSetAdapter) IsSchedulingAdapter() bool { - return true -} - -func (a *ReplicaSetAdapter) GetSchedule(obj pkgruntime.Object, key string, clusters []*fedv1beta1.Cluster, informer fedutil.FederatedInformer) (*SchedulingInfo, error) { - var clusterNames []string - for _, cluster := range clusters { - clusterNames = append(clusterNames, cluster.Name) - } - - // Schedule the pods across the existing clusters. - replicaSetGetter := func(clusterName, key string) (interface{}, bool, error) { - return informer.GetTargetStore().GetByKey(clusterName, key) - } - podsGetter := func(clusterName string, replicaSet *extensionsv1.ReplicaSet) (*apiv1.PodList, error) { - clientset, err := informer.GetClientsetForCluster(clusterName) - if err != nil { - return nil, err - } - selector, err := metav1.LabelSelectorAsSelector(replicaSet.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid selector: %v", err) - } - return clientset.Core().Pods(replicaSet.ObjectMeta.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) - } - current, estimatedCapacity, err := clustersReplicaState(clusterNames, key, replicaSetGetter, podsGetter) - if err != nil { - return nil, err - } - rs := obj.(*extensionsv1.ReplicaSet) - return &SchedulingInfo{ - Schedule: a.schedule(rs, clusterNames, current, estimatedCapacity), - Status: SchedulingStatus{}, - }, nil -} - -func (a *ReplicaSetAdapter) ScheduleObject(cluster *fedv1beta1.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo *SchedulingInfo) (pkgruntime.Object, bool, error) { - rs := federationObjCopy.(*extensionsv1.ReplicaSet) - - replicas, ok := schedulingInfo.Schedule[cluster.Name] - if !ok { - replicas = 0 - } - specReplicas := int32(replicas) - rs.Spec.Replicas = &specReplicas - - if clusterObj != nil { - clusterRs := clusterObj.(*extensionsv1.ReplicaSet) - schedulingInfo.Status.Replicas += clusterRs.Status.Replicas - schedulingInfo.Status.FullyLabeledReplicas += clusterRs.Status.FullyLabeledReplicas - schedulingInfo.Status.ReadyReplicas += clusterRs.Status.ReadyReplicas - schedulingInfo.Status.AvailableReplicas += clusterRs.Status.AvailableReplicas - } - return rs, replicas > 0, nil -} - -func (a *ReplicaSetAdapter) UpdateFederatedStatus(obj pkgruntime.Object, status SchedulingStatus) error { - rs := obj.(*extensionsv1.ReplicaSet) - - if status.Replicas != rs.Status.Replicas || status.FullyLabeledReplicas != rs.Status.FullyLabeledReplicas || - status.ReadyReplicas != rs.Status.ReadyReplicas || status.AvailableReplicas != rs.Status.AvailableReplicas { - rs.Status = extensionsv1.ReplicaSetStatus{ - Replicas: status.Replicas, - FullyLabeledReplicas: status.Replicas, - ReadyReplicas: status.ReadyReplicas, - AvailableReplicas: status.AvailableReplicas, - } - _, err := a.client.Extensions().ReplicaSets(rs.Namespace).UpdateStatus(rs) - return err - } - return nil -} - func (a *ReplicaSetAdapter) EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Object) bool { replicaset1 := obj1.(*extensionsv1.ReplicaSet) replicaset2 := a.Copy(obj2).(*extensionsv1.ReplicaSet) @@ -238,93 +159,6 @@ func (a *ReplicaSetAdapter) EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Obj return fedutil.ObjectMetaAndSpecEquivalent(replicaset1, replicaset2) } -func (a *ReplicaSetAdapter) schedule(frs *extensionsv1.ReplicaSet, clusterNames []string, - current map[string]int64, estimatedCapacity map[string]int64) map[string]int64 { - // TODO: integrate real scheduler - - plnr := a.defaultPlanner - frsPref, err := replicapreferences.GetAllocationPreferences(frs, FedReplicaSetPreferencesAnnotation) - if err != nil { - glog.Info("Invalid ReplicaSet specific preference, use default. rs: %v, err: %v", frs, err) - } - if frsPref != nil { // create a new planner if user specified a preference - plnr = planner.NewPlanner(frsPref) - } - - replicas := int64(*frs.Spec.Replicas) - scheduleResult, overflow := plnr.Plan(replicas, clusterNames, current, estimatedCapacity, - frs.Namespace+"/"+frs.Name) - // Ensure that the schedule being returned has scheduling instructions for - // all of the clusters that currently have replicas. A cluster that was in - // the previous schedule but is not in the new schedule should have zero - // replicas. - result := make(map[string]int64) - for clusterName := range current { - result[clusterName] = 0 - } - for clusterName, replicas := range scheduleResult { - result[clusterName] = replicas - } - for clusterName, replicas := range overflow { - result[clusterName] += replicas - } - if glog.V(4) { - buf := bytes.NewBufferString(fmt.Sprintf("Schedule - ReplicaSet: %s/%s\n", frs.Namespace, frs.Name)) - sort.Strings(clusterNames) - for _, clusterName := range clusterNames { - cur := current[clusterName] - target := scheduleResult[clusterName] - fmt.Fprintf(buf, "%s: current: %d target: %d", clusterName, cur, target) - if over, found := overflow[clusterName]; found { - fmt.Fprintf(buf, " overflow: %d", over) - } - if capacity, found := estimatedCapacity[clusterName]; found { - fmt.Fprintf(buf, " capacity: %d", capacity) - } - fmt.Fprintf(buf, "\n") - } - glog.V(4).Infof(buf.String()) - } - return result -} - -// clusterReplicaState returns information about the scheduling state of the pods running in the federated clusters. -func clustersReplicaState( - clusterNames []string, - replicaSetKey string, - replicaSetGetter func(clusterName string, key string) (interface{}, bool, error), - podsGetter func(clusterName string, replicaSet *extensionsv1.ReplicaSet) (*apiv1.PodList, error)) (current map[string]int64, estimatedCapacity map[string]int64, err error) { - - current = make(map[string]int64) - estimatedCapacity = make(map[string]int64) - - for _, clusterName := range clusterNames { - rsObj, exists, err := replicaSetGetter(clusterName, replicaSetKey) - if err != nil { - return nil, nil, err - } - if !exists { - continue - } - rs := rsObj.(*extensionsv1.ReplicaSet) - if int32(*rs.Spec.Replicas) == rs.Status.ReadyReplicas { - current[clusterName] = int64(rs.Status.ReadyReplicas) - } else { - pods, err := podsGetter(clusterName, rs) - if err != nil { - return nil, nil, err - } - podStatus := podanalyzer.AnalyzePods(pods, time.Now()) - current[clusterName] = int64(podStatus.RunningAndReady) // include pending as well? - unschedulable := int64(podStatus.Unschedulable) - if unschedulable > 0 { - estimatedCapacity[clusterName] = int64(*rs.Spec.Replicas) - unschedulable - } - } - } - return current, estimatedCapacity, nil -} - func (a *ReplicaSetAdapter) NewTestObject(namespace string) pkgruntime.Object { replicas := int32(3) zero := int64(0) diff --git a/federation/pkg/federatedtypes/scheduling.go b/federation/pkg/federatedtypes/scheduling.go index c5d0b93a845..32e96c7779c 100644 --- a/federation/pkg/federatedtypes/scheduling.go +++ b/federation/pkg/federatedtypes/scheduling.go @@ -17,15 +17,31 @@ limitations under the License. package federatedtypes import ( + "bytes" + "fmt" + "reflect" + "sort" + "time" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" + fedapi "k8s.io/kubernetes/federation/apis/federation" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util" + "k8s.io/kubernetes/federation/pkg/federation-controller/util/planner" + "k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer" + "k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences" + + "github.com/golang/glog" ) // SchedulingStatus contains the status of the objects that are being // scheduled into joined clusters. type SchedulingStatus struct { Replicas int32 + UpdatedReplicas int32 FullyLabeledReplicas int32 ReadyReplicas int32 AvailableReplicas int32 @@ -49,3 +65,171 @@ type SchedulingAdapter interface { // equivalent ignoring differences due to scheduling. EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Object) bool } + +// schedulingAdapter is meant to be embedded in other type adapters that require +// workload scheduling. +type schedulingAdapter struct { + preferencesAnnotationName string + updateStatusFunc func(pkgruntime.Object, SchedulingStatus) error +} + +func (a *schedulingAdapter) IsSchedulingAdapter() bool { + return true +} + +func (a *schedulingAdapter) GetSchedule(obj pkgruntime.Object, key string, clusters []*federationapi.Cluster, informer fedutil.FederatedInformer) (*SchedulingInfo, error) { + var clusterNames []string + for _, cluster := range clusters { + clusterNames = append(clusterNames, cluster.Name) + } + + // Schedule the pods across the existing clusters. + objectGetter := func(clusterName, key string) (interface{}, bool, error) { + return informer.GetTargetStore().GetByKey(clusterName, key) + } + podsGetter := func(clusterName string, obj pkgruntime.Object) (*apiv1.PodList, error) { + clientset, err := informer.GetClientsetForCluster(clusterName) + if err != nil { + return nil, err + } + selectorObj := reflect.ValueOf(obj).Elem().FieldByName("Spec").FieldByName("Selector").Interface().(*metav1.LabelSelector) + selector, err := metav1.LabelSelectorAsSelector(selectorObj) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return clientset.Core().Pods(metadata.GetNamespace()).List(metav1.ListOptions{LabelSelector: selector.String()}) + } + currentReplicasPerCluster, estimatedCapacity, err := clustersReplicaState(clusterNames, key, objectGetter, podsGetter) + if err != nil { + return nil, err + } + + fedPref, err := replicapreferences.GetAllocationPreferences(obj, a.preferencesAnnotationName) + if err != nil { + glog.Infof("Invalid workload-type specific preference, using default. object: %v, err: %v", obj, err) + } + if fedPref == nil { + fedPref = &fedapi.ReplicaAllocationPreferences{ + Clusters: map[string]fedapi.ClusterPreferences{ + "*": {Weight: 1}, + }, + } + } + + plnr := planner.NewPlanner(fedPref) + + return &SchedulingInfo{ + Schedule: schedule(plnr, obj, key, clusterNames, currentReplicasPerCluster, estimatedCapacity), + Status: SchedulingStatus{}, + }, nil +} + +func (a *schedulingAdapter) ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo *SchedulingInfo) (pkgruntime.Object, bool, error) { + replicas, ok := schedulingInfo.Schedule[cluster.Name] + if !ok { + replicas = 0 + } + + specReplicas := int32(replicas) + reflect.ValueOf(federationObjCopy).Elem().FieldByName("Spec").FieldByName("Replicas").Set(reflect.ValueOf(&specReplicas)) + + if clusterObj != nil { + schedulingStatusVal := reflect.ValueOf(schedulingInfo).Elem().FieldByName("Status") + objStatusVal := reflect.ValueOf(clusterObj).Elem().FieldByName("Status") + for i := 0; i < schedulingStatusVal.NumField(); i++ { + schedulingStatusField := schedulingStatusVal.Field(i) + schedulingStatusFieldName := schedulingStatusVal.Type().Field(i).Name + objStatusField := objStatusVal.FieldByName(schedulingStatusFieldName) + if objStatusField.IsValid() { + current := schedulingStatusField.Int() + additional := objStatusField.Int() + schedulingStatusField.SetInt(current + additional) + } + } + } + return federationObjCopy, replicas > 0, nil +} + +func (a *schedulingAdapter) UpdateFederatedStatus(obj pkgruntime.Object, status SchedulingStatus) error { + return a.updateStatusFunc(obj, status) +} + +func schedule(planner *planner.Planner, obj pkgruntime.Object, key string, clusterNames []string, currentReplicasPerCluster map[string]int64, estimatedCapacity map[string]int64) map[string]int64 { + // TODO: integrate real scheduler + replicas := reflect.ValueOf(obj).Elem().FieldByName("Spec").FieldByName("Replicas").Elem().Int() + scheduleResult, overflow := planner.Plan(replicas, clusterNames, currentReplicasPerCluster, estimatedCapacity, key) + + // Ensure that all current clusters end up in the scheduling result. + result := make(map[string]int64) + for clusterName := range currentReplicasPerCluster { + result[clusterName] = 0 + } + + for clusterName, replicas := range scheduleResult { + result[clusterName] = replicas + } + for clusterName, replicas := range overflow { + result[clusterName] += replicas + } + + if glog.V(4) { + buf := bytes.NewBufferString(fmt.Sprintf("Schedule - %q\n", key)) + sort.Strings(clusterNames) + for _, clusterName := range clusterNames { + cur := currentReplicasPerCluster[clusterName] + target := scheduleResult[clusterName] + fmt.Fprintf(buf, "%s: current: %d target: %d", clusterName, cur, target) + if over, found := overflow[clusterName]; found { + fmt.Fprintf(buf, " overflow: %d", over) + } + if capacity, found := estimatedCapacity[clusterName]; found { + fmt.Fprintf(buf, " capacity: %d", capacity) + } + fmt.Fprintf(buf, "\n") + } + glog.V(4).Infof(buf.String()) + } + return result +} + +// clusterReplicaState returns information about the scheduling state of the pods running in the federated clusters. +func clustersReplicaState( + clusterNames []string, + key string, + objectGetter func(clusterName string, key string) (interface{}, bool, error), + podsGetter func(clusterName string, obj pkgruntime.Object) (*apiv1.PodList, error)) (currentReplicasPerCluster map[string]int64, estimatedCapacity map[string]int64, err error) { + + currentReplicasPerCluster = make(map[string]int64) + estimatedCapacity = make(map[string]int64) + + for _, clusterName := range clusterNames { + obj, exists, err := objectGetter(clusterName, key) + if err != nil { + return nil, nil, err + } + if !exists { + continue + } + replicas := reflect.ValueOf(obj).Elem().FieldByName("Spec").FieldByName("Replicas").Elem().Int() + readyReplicas := reflect.ValueOf(obj).Elem().FieldByName("Status").FieldByName("ReadyReplicas").Int() + if replicas == readyReplicas { + currentReplicasPerCluster[clusterName] = readyReplicas + } else { + pods, err := podsGetter(clusterName, obj.(pkgruntime.Object)) + if err != nil { + return nil, nil, err + } + podStatus := podanalyzer.AnalyzePods(pods, time.Now()) + currentReplicasPerCluster[clusterName] = int64(podStatus.RunningAndReady) // include pending as well? + unschedulable := int64(podStatus.Unschedulable) + if unschedulable > 0 { + estimatedCapacity[clusterName] = replicas - unschedulable + } + } + } + return currentReplicasPerCluster, estimatedCapacity, nil +} diff --git a/federation/pkg/federatedtypes/replicaset_test.go b/federation/pkg/federatedtypes/scheduling_test.go similarity index 90% rename from federation/pkg/federatedtypes/replicaset_test.go rename to federation/pkg/federatedtypes/scheduling_test.go index 271612039ba..d938e525a56 100644 --- a/federation/pkg/federatedtypes/replicaset_test.go +++ b/federation/pkg/federatedtypes/scheduling_test.go @@ -24,21 +24,22 @@ import ( apiv1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" "github.com/stretchr/testify/assert" ) func TestClusterReplicaState(t *testing.T) { - uncalledPodsGetter := func(clusterName string, replicaSet *extensionsv1.ReplicaSet) (*apiv1.PodList, error) { - t.Fatal("podsGetter should not be called when replica sets are all ready.") + uncalledPodsGetter := func(clusterName string, obj pkgruntime.Object) (*apiv1.PodList, error) { + t.Fatal("podsGetter should not be called when workload objects are all ready.") return nil, nil } - podsByReplicaSet := make(map[*extensionsv1.ReplicaSet][]*apiv1.Pod) - podsGetter := func(clusterName string, replicaSet *extensionsv1.ReplicaSet) (*apiv1.PodList, error) { - pods, ok := podsByReplicaSet[replicaSet] + podsByReplicaSet := make(map[pkgruntime.Object][]*apiv1.Pod) + podsGetter := func(clusterName string, obj pkgruntime.Object) (*apiv1.PodList, error) { + pods, ok := podsByReplicaSet[obj] if !ok { - t.Fatalf("No pods found in test data for replica set named %v", replicaSet.Name) + t.Fatalf("No pods found in test data for replica set %v", obj) return nil, fmt.Errorf("Not found") } var podListPods []apiv1.Pod @@ -64,7 +65,7 @@ func TestClusterReplicaState(t *testing.T) { rs2Replicas int32 rs1ReadyReplicas int32 rs2ReadyReplicas int32 - podsGetter func(clusterName string, replicaSet *extensionsv1.ReplicaSet) (*apiv1.PodList, error) + podsGetter func(clusterName string, obj pkgruntime.Object) (*apiv1.PodList, error) pod1Phase apiv1.PodPhase pod1Condition apiv1.PodCondition pod2Phase apiv1.PodPhase diff --git a/federation/pkg/federatedtypes/secret.go b/federation/pkg/federatedtypes/secret.go index 937b8c65fc7..9208c96e293 100644 --- a/federation/pkg/federatedtypes/secret.go +++ b/federation/pkg/federatedtypes/secret.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/federation/pkg/federation-controller/util" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -41,7 +41,7 @@ type SecretAdapter struct { client federationclientset.Interface } -func NewSecretAdapter(client federationclientset.Interface) FederatedTypeAdapter { +func NewSecretAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter { return &SecretAdapter{client: client} } @@ -73,9 +73,9 @@ func (a *SecretAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool { return util.SecretEquivalent(*secret1, *secret2) } -func (a *SecretAdapter) NamespacedName(obj pkgruntime.Object) types.NamespacedName { +func (a *SecretAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName { secret := obj.(*apiv1.Secret) - return types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name} + return QualifiedName{Namespace: secret.Namespace, Name: secret.Name} } func (a *SecretAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta { @@ -87,12 +87,12 @@ func (a *SecretAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, err return a.client.CoreV1().Secrets(secret.Namespace).Create(secret) } -func (a *SecretAdapter) FedDelete(namespacedName types.NamespacedName, options *metav1.DeleteOptions) error { - return a.client.CoreV1().Secrets(namespacedName.Namespace).Delete(namespacedName.Name, options) +func (a *SecretAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return a.client.CoreV1().Secrets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *SecretAdapter) FedGet(namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return a.client.CoreV1().Secrets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *SecretAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) { + return a.client.CoreV1().Secrets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *SecretAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { @@ -113,12 +113,12 @@ func (a *SecretAdapter) ClusterCreate(client kubeclientset.Interface, obj pkgrun return client.CoreV1().Secrets(secret.Namespace).Create(secret) } -func (a *SecretAdapter) ClusterDelete(client kubeclientset.Interface, nsName types.NamespacedName, options *metav1.DeleteOptions) error { - return client.CoreV1().Secrets(nsName.Namespace).Delete(nsName.Name, options) +func (a *SecretAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error { + return client.CoreV1().Secrets(qualifiedName.Namespace).Delete(qualifiedName.Name, options) } -func (a *SecretAdapter) ClusterGet(client kubeclientset.Interface, namespacedName types.NamespacedName) (pkgruntime.Object, error) { - return client.CoreV1().Secrets(namespacedName.Namespace).Get(namespacedName.Name, metav1.GetOptions{}) +func (a *SecretAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) { + return client.CoreV1().Secrets(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) } func (a *SecretAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) { diff --git a/federation/pkg/federation-controller/BUILD b/federation/pkg/federation-controller/BUILD index 310b2860242..28057fc52ea 100644 --- a/federation/pkg/federation-controller/BUILD +++ b/federation/pkg/federation-controller/BUILD @@ -25,9 +25,7 @@ filegroup( srcs = [ ":package-srcs", "//federation/pkg/federation-controller/cluster:all-srcs", - "//federation/pkg/federation-controller/deployment:all-srcs", "//federation/pkg/federation-controller/ingress:all-srcs", - "//federation/pkg/federation-controller/namespace:all-srcs", "//federation/pkg/federation-controller/service:all-srcs", "//federation/pkg/federation-controller/sync:all-srcs", "//federation/pkg/federation-controller/util:all-srcs", diff --git a/federation/pkg/federation-controller/deployment/BUILD b/federation/pkg/federation-controller/deployment/BUILD deleted file mode 100644 index 207236f2eab..00000000000 --- a/federation/pkg/federation-controller/deployment/BUILD +++ /dev/null @@ -1,76 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["deploymentcontroller.go"], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset:go_default_library", - "//federation/pkg/federation-controller/util:go_default_library", - "//federation/pkg/federation-controller/util/deletionhelper:go_default_library", - "//federation/pkg/federation-controller/util/eventsink:go_default_library", - "//federation/pkg/federation-controller/util/planner:go_default_library", - "//federation/pkg/federation-controller/util/replicapreferences:go_default_library", - "//pkg/api:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/controller:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/k8s.io/client-go/util/workqueue:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["deploymentcontroller_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/fake:go_default_library", - "//federation/pkg/federation-controller/util/test:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/fake:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/federation/pkg/federation-controller/deployment/deploymentcontroller.go b/federation/pkg/federation-controller/deployment/deploymentcontroller.go deleted file mode 100644 index ccb1e13a480..00000000000 --- a/federation/pkg/federation-controller/deployment/deploymentcontroller.go +++ /dev/null @@ -1,649 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "bytes" - "fmt" - "sort" - "time" - - "github.com/golang/glog" - - apiv1 "k8s.io/api/core/v1" - clientv1 "k8s.io/api/core/v1" - extensionsv1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" - "k8s.io/client-go/util/workqueue" - fed "k8s.io/kubernetes/federation/apis/federation" - fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" - fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/planner" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences" - "k8s.io/kubernetes/pkg/api" - kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/controller" -) - -const ( - FedDeploymentPreferencesAnnotation = "federation.kubernetes.io/deployment-preferences" - allClustersKey = "THE_ALL_CLUSTER_KEY" - UserAgentName = "federation-deployment-controller" - ControllerName = "deployments" -) - -var ( - RequiredResources = []schema.GroupVersionResource{extensionsv1.SchemeGroupVersion.WithResource("deployments")} - deploymentReviewDelay = 10 * time.Second - clusterAvailableDelay = 20 * time.Second - clusterUnavailableDelay = 60 * time.Second - allDeploymentReviewDelay = 2 * time.Minute - updateTimeout = 30 * time.Second -) - -type DeploymentController struct { - fedClient fedclientset.Interface - - deploymentController cache.Controller - deploymentStore cache.Store - - fedDeploymentInformer fedutil.FederatedInformer - fedPodInformer fedutil.FederatedInformer - - deploymentDeliverer *fedutil.DelayingDeliverer - clusterDeliverer *fedutil.DelayingDeliverer - deploymentWorkQueue workqueue.Interface - // For updating members of federation. - fedUpdater fedutil.FederatedUpdater - deploymentBackoff *flowcontrol.Backoff - eventRecorder record.EventRecorder - - deletionHelper *deletionhelper.DeletionHelper - - defaultPlanner *planner.Planner -} - -// NewDeploymentController returns a new deployment controller -func NewDeploymentController(federationClient fedclientset.Interface) *DeploymentController { - broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(federationClient)) - recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: UserAgentName}) - - fdc := &DeploymentController{ - fedClient: federationClient, - deploymentDeliverer: fedutil.NewDelayingDeliverer(), - clusterDeliverer: fedutil.NewDelayingDeliverer(), - deploymentWorkQueue: workqueue.New(), - deploymentBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), - defaultPlanner: planner.NewPlanner(&fed.ReplicaAllocationPreferences{ - Clusters: map[string]fed.ClusterPreferences{ - "*": {Weight: 1}, - }, - }), - eventRecorder: recorder, - } - - deploymentFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { - return cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return clientset.Extensions().Deployments(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return clientset.Extensions().Deployments(metav1.NamespaceAll).Watch(options) - }, - }, - &extensionsv1.Deployment{}, - controller.NoResyncPeriodFunc(), - fedutil.NewTriggerOnAllChanges( - func(obj runtime.Object) { fdc.deliverLocalDeployment(obj, deploymentReviewDelay) }, - ), - ) - } - clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{ - ClusterAvailable: func(cluster *fedv1.Cluster) { - fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay) - }, - ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) { - fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay) - }, - } - fdc.fedDeploymentInformer = fedutil.NewFederatedInformer(federationClient, deploymentFedInformerFactory, &clusterLifecycle) - - podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { - return cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return clientset.Core().Pods(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return clientset.Core().Pods(metav1.NamespaceAll).Watch(options) - }, - }, - &apiv1.Pod{}, - controller.NoResyncPeriodFunc(), - fedutil.NewTriggerOnAllChanges( - func(obj runtime.Object) { - fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, allDeploymentReviewDelay) - }, - ), - ) - } - fdc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{}) - - fdc.deploymentStore, fdc.deploymentController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fdc.fedClient.Extensions().Deployments(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fdc.fedClient.Extensions().Deployments(metav1.NamespaceAll).Watch(options) - }, - }, - &extensionsv1.Deployment{}, - controller.NoResyncPeriodFunc(), - fedutil.NewTriggerOnMetaAndSpecChanges( - func(obj runtime.Object) { fdc.deliverFedDeploymentObj(obj, deploymentReviewDelay) }, - ), - ) - - fdc.fedUpdater = fedutil.NewFederatedUpdater(fdc.fedDeploymentInformer, "deployment", updateTimeout, fdc.eventRecorder, - func(client kubeclientset.Interface, obj runtime.Object) error { - rs := obj.(*extensionsv1.Deployment) - _, err := client.Extensions().Deployments(rs.Namespace).Create(rs) - return err - }, - func(client kubeclientset.Interface, obj runtime.Object) error { - rs := obj.(*extensionsv1.Deployment) - _, err := client.Extensions().Deployments(rs.Namespace).Update(rs) - return err - }, - func(client kubeclientset.Interface, obj runtime.Object) error { - rs := obj.(*extensionsv1.Deployment) - orphanDependents := false - err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) - return err - }) - - fdc.deletionHelper = deletionhelper.NewDeletionHelper( - fdc.updateDeployment, - // objNameFunc - func(obj runtime.Object) string { - deployment := obj.(*extensionsv1.Deployment) - return fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name) - }, - fdc.fedDeploymentInformer, - fdc.fedUpdater, - ) - - return fdc -} - -// Sends the given updated object to apiserver. -// Assumes that the given object is a deployment. -func (fdc *DeploymentController) updateDeployment(obj runtime.Object) (runtime.Object, error) { - deployment := obj.(*extensionsv1.Deployment) - return fdc.fedClient.Extensions().Deployments(deployment.Namespace).Update(deployment) -} - -func (fdc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { - go fdc.deploymentController.Run(stopCh) - fdc.fedDeploymentInformer.Start() - fdc.fedPodInformer.Start() - - fdc.deploymentDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) { - fdc.deploymentWorkQueue.Add(item.Key) - }) - fdc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) { - fdc.reconcileDeploymentsOnClusterChange() - }) - - // Wait until the cluster is synced to prevent the update storm at the very beginning. - for !fdc.isSynced() { - time.Sleep(5 * time.Millisecond) - glog.V(3).Infof("Waiting for controller to sync up") - } - - for i := 0; i < workers; i++ { - go wait.Until(fdc.worker, time.Second, stopCh) - } - - fedutil.StartBackoffGC(fdc.deploymentBackoff, stopCh) - - <-stopCh - glog.Infof("Shutting down DeploymentController") - fdc.deploymentDeliverer.Stop() - fdc.clusterDeliverer.Stop() - fdc.deploymentWorkQueue.ShutDown() - fdc.fedDeploymentInformer.Stop() - fdc.fedPodInformer.Stop() -} - -func (fdc *DeploymentController) isSynced() bool { - if !fdc.fedDeploymentInformer.ClustersSynced() { - glog.V(2).Infof("Cluster list not synced") - return false - } - clustersFromDeps, err := fdc.fedDeploymentInformer.GetReadyClusters() - if err != nil { - glog.Errorf("Failed to get ready clusters: %v", err) - return false - } - if !fdc.fedDeploymentInformer.GetTargetStore().ClustersSynced(clustersFromDeps) { - return false - } - - if !fdc.fedPodInformer.ClustersSynced() { - glog.V(2).Infof("Cluster list not synced") - return false - } - clustersFromPods, err := fdc.fedPodInformer.GetReadyClusters() - if err != nil { - glog.Errorf("Failed to get ready clusters: %v", err) - return false - } - - // This also checks whether podInformer and deploymentInformer have the - // same cluster lists. - if !fdc.fedPodInformer.GetTargetStore().ClustersSynced(clustersFromDeps) { - glog.V(2).Infof("Pod informer not synced") - return false - } - if !fdc.fedPodInformer.GetTargetStore().ClustersSynced(clustersFromPods) { - glog.V(2).Infof("Pod informer not synced") - return false - } - - if !fdc.deploymentController.HasSynced() { - glog.V(2).Infof("federation deployment list not synced") - return false - } - return true -} - -func (fdc *DeploymentController) deliverLocalDeployment(obj interface{}, duration time.Duration) { - key, err := controller.KeyFunc(obj) - if err != nil { - glog.Errorf("Couldn't get key for object %v: %v", obj, err) - return - } - _, exists, err := fdc.deploymentStore.GetByKey(key) - if err != nil { - glog.Errorf("Couldn't get federation deployment %v: %v", key, err) - return - } - if exists { // ignore deployments exists only in local k8s - fdc.deliverDeploymentByKey(key, duration, false) - } -} - -func (fdc *DeploymentController) deliverFedDeploymentObj(obj interface{}, delay time.Duration) { - key, err := controller.KeyFunc(obj) - if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) - return - } - fdc.deliverDeploymentByKey(key, delay, false) -} - -func (fdc *DeploymentController) deliverDeploymentByKey(key string, delay time.Duration, failed bool) { - if failed { - fdc.deploymentBackoff.Next(key, time.Now()) - delay = delay + fdc.deploymentBackoff.Get(key) - } else { - fdc.deploymentBackoff.Reset(key) - } - fdc.deploymentDeliverer.DeliverAfter(key, nil, delay) -} - -func (fdc *DeploymentController) worker() { - for { - item, quit := fdc.deploymentWorkQueue.Get() - if quit { - return - } - key := item.(string) - status, err := fdc.reconcileDeployment(key) - fdc.deploymentWorkQueue.Done(item) - if err != nil { - glog.Errorf("Error syncing cluster controller: %v", err) - fdc.deliverDeploymentByKey(key, 0, true) - } else { - switch status { - case statusAllOk: - break - case statusError: - fdc.deliverDeploymentByKey(key, 0, true) - case statusNeedRecheck: - fdc.deliverDeploymentByKey(key, deploymentReviewDelay, false) - case statusNotSynced: - fdc.deliverDeploymentByKey(key, clusterAvailableDelay, false) - default: - glog.Errorf("Unhandled reconciliation status: %s", status) - fdc.deliverDeploymentByKey(key, deploymentReviewDelay, false) - } - } - } -} - -type podAnalysisResult struct { - // Total number of pods created. - total int - // Number of pods that are running and ready. - runningAndReady int - // Number of pods that have been in unschedulable state for UnshedulableThreshold seconds. - unschedulable int - - // TODO: Handle other scenarios like pod waiting too long for scheduler etc. -} - -const ( - // TODO: make it configurable - unschedulableThreshold = 60 * time.Second -) - -// A function that calculates how many pods from the list are in one of -// the meaningful (from the replica set perspective) states. This function is -// a temporary workaround against the current lack of ownerRef in pods. -// TODO(perotinus): Unify this with the ReplicaSet controller. -func analyzePods(selectorv1 *metav1.LabelSelector, allPods []fedutil.FederatedObject, currentTime time.Time) (map[string]podAnalysisResult, error) { - selector, err := metav1.LabelSelectorAsSelector(selectorv1) - if err != nil { - return nil, fmt.Errorf("invalid selector: %v", err) - } - result := make(map[string]podAnalysisResult) - - for _, fedObject := range allPods { - pod, isPod := fedObject.Object.(*apiv1.Pod) - if !isPod { - return nil, fmt.Errorf("invalid arg content - not a *pod") - } - if !selector.Empty() && selector.Matches(labels.Set(pod.Labels)) { - status := result[fedObject.ClusterName] - status.total++ - for _, condition := range pod.Status.Conditions { - if pod.Status.Phase == apiv1.PodRunning { - if condition.Type == apiv1.PodReady { - status.runningAndReady++ - } - } else { - if condition.Type == apiv1.PodScheduled && - condition.Status == apiv1.ConditionFalse && - condition.Reason == apiv1.PodReasonUnschedulable && - condition.LastTransitionTime.Add(unschedulableThreshold).Before(currentTime) { - - status.unschedulable++ - } - } - } - result[fedObject.ClusterName] = status - } - } - return result, nil -} - -func (fdc *DeploymentController) schedule(fd *extensionsv1.Deployment, clusters []*fedv1.Cluster, - current map[string]int64, estimatedCapacity map[string]int64) map[string]int64 { - // TODO: integrate real scheduler - - plannerToBeUsed := fdc.defaultPlanner - fdPref, err := replicapreferences.GetAllocationPreferences(fd, FedDeploymentPreferencesAnnotation) - if err != nil { - glog.Info("Invalid Deployment specific preference, use default. deployment: %v, err: %v", fd.Name, err) - } - if fdPref != nil { // create a new planner if user specified a preference - plannerToBeUsed = planner.NewPlanner(fdPref) - } - replicas := int64(0) - if fd.Spec.Replicas != nil { - replicas = int64(*fd.Spec.Replicas) - } - var clusterNames []string - for _, cluster := range clusters { - clusterNames = append(clusterNames, cluster.Name) - } - scheduleResult, overflow := plannerToBeUsed.Plan(replicas, clusterNames, current, estimatedCapacity, - fd.Namespace+"/"+fd.Name) - // make sure the result contains all clusters that currently have some replicas. - result := make(map[string]int64) - for clusterName := range current { - result[clusterName] = 0 - } - for clusterName, replicas := range scheduleResult { - result[clusterName] = replicas - } - for clusterName, replicas := range overflow { - result[clusterName] += replicas - } - if glog.V(4) { - buf := bytes.NewBufferString(fmt.Sprintf("Schedule - Deployment: %s/%s\n", fd.Namespace, fd.Name)) - sort.Strings(clusterNames) - for _, clusterName := range clusterNames { - cur := current[clusterName] - target := scheduleResult[clusterName] - fmt.Fprintf(buf, "%s: current: %d target: %d", clusterName, cur, target) - if over, found := overflow[clusterName]; found { - fmt.Fprintf(buf, " overflow: %d", over) - } - if capacity, found := estimatedCapacity[clusterName]; found { - fmt.Fprintf(buf, " capacity: %d", capacity) - } - fmt.Fprintf(buf, "\n") - } - glog.V(4).Infof(buf.String()) - } - return result -} - -type reconciliationStatus string - -const ( - statusAllOk = reconciliationStatus("ALL_OK") - statusNeedRecheck = reconciliationStatus("RECHECK") - statusError = reconciliationStatus("ERROR") - statusNotSynced = reconciliationStatus("NOSYNC") -) - -func (fdc *DeploymentController) reconcileDeployment(key string) (reconciliationStatus, error) { - if !fdc.isSynced() { - return statusNotSynced, nil - } - - glog.V(4).Infof("Start reconcile deployment %q", key) - startTime := time.Now() - defer glog.V(4).Infof("Finished reconcile deployment %q (%v)", key, time.Now().Sub(startTime)) - - objFromStore, exists, err := fdc.deploymentStore.GetByKey(key) - if err != nil { - return statusError, err - } - if !exists { - // don't delete local deployments for now. Do not reconcile it anymore. - return statusAllOk, nil - } - obj, err := api.Scheme.DeepCopy(objFromStore) - fd, ok := obj.(*extensionsv1.Deployment) - if err != nil || !ok { - glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) - return statusError, err - } - - if fd.DeletionTimestamp != nil { - if err := fdc.delete(fd); err != nil { - glog.Errorf("Failed to delete %s: %v", fd.Name, err) - fdc.eventRecorder.Eventf(fd, api.EventTypeWarning, "DeleteFailed", - "Deployment delete failed: %v", err) - return statusError, err - } - return statusAllOk, nil - } - - glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for deployment: %s", - fd.Name) - // Add the required finalizers before creating a deployment in underlying clusters. - updatedDeploymentObj, err := fdc.deletionHelper.EnsureFinalizers(fd) - if err != nil { - glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in deployment %s: %v", - fd.Name, err) - return statusError, err - } - fd = updatedDeploymentObj.(*extensionsv1.Deployment) - - glog.V(3).Infof("Syncing deployment %s in underlying clusters", fd.Name) - - clusters, err := fdc.fedDeploymentInformer.GetReadyClusters() - if err != nil { - return statusError, err - } - - // collect current status and do schedule - allPods, err := fdc.fedPodInformer.GetTargetStore().List() - if err != nil { - return statusError, err - } - podStatus, err := analyzePods(fd.Spec.Selector, allPods, time.Now()) - current := make(map[string]int64) - estimatedCapacity := make(map[string]int64) - for _, cluster := range clusters { - ldObj, exists, err := fdc.fedDeploymentInformer.GetTargetStore().GetByKey(cluster.Name, key) - if err != nil { - return statusError, err - } - if exists { - ld := ldObj.(*extensionsv1.Deployment) - current[cluster.Name] = int64(podStatus[cluster.Name].runningAndReady) // include pending as well? - unschedulable := int64(podStatus[cluster.Name].unschedulable) - if unschedulable > 0 { - estimatedCapacity[cluster.Name] = int64(*ld.Spec.Replicas) - unschedulable - } - } - } - - scheduleResult := fdc.schedule(fd, clusters, current, estimatedCapacity) - - glog.V(4).Infof("Start syncing local deployment %s: %v", key, scheduleResult) - - fedStatus := extensionsv1.DeploymentStatus{ObservedGeneration: fd.Generation} - operations := make([]fedutil.FederatedOperation, 0) - for clusterName, replicas := range scheduleResult { - - ldObj, exists, err := fdc.fedDeploymentInformer.GetTargetStore().GetByKey(clusterName, key) - if err != nil { - return statusError, err - } - - // The object can be modified. - ld := fedutil.DeepCopyDeployment(fd) - specReplicas := int32(replicas) - ld.Spec.Replicas = &specReplicas - - if !exists { - if replicas > 0 { - operations = append(operations, fedutil.FederatedOperation{ - Type: fedutil.OperationTypeAdd, - Obj: ld, - ClusterName: clusterName, - Key: key, - }) - } - } else { - // TODO: Update only one deployment at a time if update strategy is rolling update. - - currentLd := ldObj.(*extensionsv1.Deployment) - // Update existing replica set, if needed. - if !fedutil.DeploymentEquivalent(ld, currentLd) { - operations = append(operations, fedutil.FederatedOperation{ - Type: fedutil.OperationTypeUpdate, - Obj: ld, - ClusterName: clusterName, - Key: key, - }) - glog.Infof("Updating %s in %s", currentLd.Name, clusterName) - } - fedStatus.Replicas += currentLd.Status.Replicas - fedStatus.AvailableReplicas += currentLd.Status.AvailableReplicas - fedStatus.UnavailableReplicas += currentLd.Status.UnavailableReplicas - fedStatus.ReadyReplicas += currentLd.Status.ReadyReplicas - } - } - if fedStatus.Replicas != fd.Status.Replicas || - fedStatus.AvailableReplicas != fd.Status.AvailableReplicas || - fedStatus.UnavailableReplicas != fd.Status.UnavailableReplicas || - fedStatus.ReadyReplicas != fd.Status.ReadyReplicas { - fd.Status = fedStatus - _, err = fdc.fedClient.Extensions().Deployments(fd.Namespace).UpdateStatus(fd) - if err != nil { - return statusError, err - } - } - - if len(operations) == 0 { - // Everything is in order - return statusAllOk, nil - } - err = fdc.fedUpdater.Update(operations) - if err != nil { - glog.Errorf("Failed to execute updates for %s: %v", key, err) - return statusError, err - } - - // Some operations were made, reconcile after a while. - return statusNeedRecheck, nil -} - -func (fdc *DeploymentController) reconcileDeploymentsOnClusterChange() { - if !fdc.isSynced() { - fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay) - } - deps := fdc.deploymentStore.List() - for _, dep := range deps { - key, _ := controller.KeyFunc(dep) - fdc.deliverDeploymentByKey(key, 0, false) - } -} - -// delete deletes the given deployment or returns error if the deletion was not complete. -func (fdc *DeploymentController) delete(deployment *extensionsv1.Deployment) error { - glog.V(3).Infof("Handling deletion of deployment: %v", *deployment) - _, err := fdc.deletionHelper.HandleObjectInUnderlyingClusters(deployment) - if err != nil { - return err - } - - err = fdc.fedClient.Extensions().Deployments(deployment.Namespace).Delete(deployment.Name, nil) - if err != nil { - // Its all good if the error is not found error. That means it is deleted already and we do not have to do anything. - // This is expected when we are processing an update as a result of deployment finalizer deletion. - // The process that deleted the last finalizer is also going to delete the deployment and we do not have to do anything. - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to delete deployment: %v", err) - } - } - return nil -} diff --git a/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go b/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go deleted file mode 100644 index a03b05b4842..00000000000 --- a/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "flag" - "fmt" - "testing" - "time" - - apiv1 "k8s.io/api/core/v1" - extensionsv1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" - fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/fake" - . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" - kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" - - "github.com/stretchr/testify/assert" -) - -const ( - deployments = "deployments" - pods = "pods" -) - -func TestDeploymentController(t *testing.T) { - flag.Set("logtostderr", "true") - flag.Set("v", "5") - flag.Parse() - - deploymentReviewDelay = 500 * time.Millisecond - clusterAvailableDelay = 100 * time.Millisecond - clusterUnavailableDelay = 100 * time.Millisecond - allDeploymentReviewDelay = 500 * time.Millisecond - - cluster1 := NewCluster("cluster1", apiv1.ConditionTrue) - cluster2 := NewCluster("cluster2", apiv1.ConditionTrue) - - fakeClient := &fakefedclientset.Clientset{} - // Add an update reactor on fake client to return the desired updated object. - // This is a hack to workaround https://github.com/kubernetes/kubernetes/issues/40939. - AddFakeUpdateReactor(deployments, &fakeClient.Fake) - RegisterFakeList("clusters", &fakeClient.Fake, &fedv1.ClusterList{Items: []fedv1.Cluster{*cluster1}}) - deploymentsWatch := RegisterFakeWatch(deployments, &fakeClient.Fake) - clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) - - cluster1Client := &fakekubeclientset.Clientset{} - cluster1Watch := RegisterFakeWatch(deployments, &cluster1Client.Fake) - _ = RegisterFakeWatch(pods, &cluster1Client.Fake) - RegisterFakeList(deployments, &cluster1Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}}) - cluster1CreateChan := RegisterFakeCopyOnCreate(deployments, &cluster1Client.Fake, cluster1Watch) - cluster1UpdateChan := RegisterFakeCopyOnUpdate(deployments, &cluster1Client.Fake, cluster1Watch) - - cluster2Client := &fakekubeclientset.Clientset{} - cluster2Watch := RegisterFakeWatch(deployments, &cluster2Client.Fake) - _ = RegisterFakeWatch(pods, &cluster2Client.Fake) - RegisterFakeList(deployments, &cluster2Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}}) - cluster2CreateChan := RegisterFakeCopyOnCreate(deployments, &cluster2Client.Fake, cluster2Watch) - - deploymentController := NewDeploymentController(fakeClient) - clientFactory := func(cluster *fedv1.Cluster) (kubeclientset.Interface, error) { - switch cluster.Name { - case cluster1.Name: - return cluster1Client, nil - case cluster2.Name: - return cluster2Client, nil - default: - return nil, fmt.Errorf("Unknown cluster") - } - } - ToFederatedInformerForTestOnly(deploymentController.fedDeploymentInformer).SetClientFactory(clientFactory) - ToFederatedInformerForTestOnly(deploymentController.fedPodInformer).SetClientFactory(clientFactory) - - stop := make(chan struct{}) - go deploymentController.Run(5, stop) - - // Create deployment. Expect to see it in cluster1. - dep1 := newDeploymentWithReplicas("depA", 6) - deploymentsWatch.Add(dep1) - checkDeployment := func(base *extensionsv1.Deployment, replicas int32) CheckingFunction { - return func(obj runtime.Object) error { - if obj == nil { - return fmt.Errorf("Observed object is nil") - } - d := obj.(*extensionsv1.Deployment) - if err := CompareObjectMeta(base.ObjectMeta, d.ObjectMeta); err != nil { - return err - } - if replicas != *d.Spec.Replicas { - return fmt.Errorf("Replica count is different expected:%d observed:%d", replicas, *d.Spec.Replicas) - } - return nil - } - } - assert.NoError(t, CheckObjectFromChan(cluster1CreateChan, checkDeployment(dep1, *dep1.Spec.Replicas))) - err := WaitForStoreUpdate( - deploymentController.fedDeploymentInformer.GetTargetStore(), - cluster1.Name, types.NamespacedName{Namespace: dep1.Namespace, Name: dep1.Name}.String(), wait.ForeverTestTimeout) - assert.Nil(t, err, "deployment should have appeared in the informer store") - - // Increase replica count. Expect to see the update in cluster1. - newRep := int32(8) - dep1.Spec.Replicas = &newRep - deploymentsWatch.Modify(dep1) - assert.NoError(t, CheckObjectFromChan(cluster1UpdateChan, checkDeployment(dep1, *dep1.Spec.Replicas))) - - // Add new cluster. Although rebalance = false, no pods have been created yet so it should - // rebalance anyway. - clusterWatch.Add(cluster2) - assert.NoError(t, CheckObjectFromChan(cluster1UpdateChan, checkDeployment(dep1, *dep1.Spec.Replicas/2))) - assert.NoError(t, CheckObjectFromChan(cluster2CreateChan, checkDeployment(dep1, *dep1.Spec.Replicas/2))) - - // Add new deployment with non-default replica placement preferences. - dep2 := newDeploymentWithReplicas("deployment2", 9) - dep2.Annotations = make(map[string]string) - dep2.Annotations[FedDeploymentPreferencesAnnotation] = `{"rebalance": true, - "clusters": { - "cluster1": {"weight": 2}, - "cluster2": {"weight": 1} - }}` - deploymentsWatch.Add(dep2) - assert.NoError(t, CheckObjectFromChan(cluster1CreateChan, checkDeployment(dep2, 6))) - assert.NoError(t, CheckObjectFromChan(cluster2CreateChan, checkDeployment(dep2, 3))) -} - -func newDeploymentWithReplicas(name string, replicas int32) *extensionsv1.Deployment { - return &extensionsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: metav1.NamespaceDefault, - SelfLink: "/api/v1/namespaces/default/deployments/name", - }, - Spec: extensionsv1.DeploymentSpec{ - Replicas: &replicas, - }, - } -} diff --git a/federation/pkg/federation-controller/ingress/ingress_controller.go b/federation/pkg/federation-controller/ingress/ingress_controller.go index 929d99452bd..34269673092 100644 --- a/federation/pkg/federation-controller/ingress/ingress_controller.go +++ b/federation/pkg/federation-controller/ingress/ingress_controller.go @@ -608,9 +608,10 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio clusterObj, clusterErr := api.Scheme.DeepCopy(cluster) // Make a clone so that we don't clobber our input param cluster, ok := clusterObj.(*federationapi.Cluster) if clusterErr != nil || !ok { - glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) - return "", err + glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, clusterErr) + return "", clusterErr } + if err == nil { if masterCluster.Name != cluster.Name { // We're not the master, need to get in sync if cluster.ObjectMeta.Annotations == nil { @@ -629,8 +630,9 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio return cluster.ObjectMeta.Annotations[uidAnnotationKey], nil } } else { - glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q. Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, cluster.Name, uidAnnotationKey, fallbackUID) + glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q.", cluster.Name) if fallbackUID != "" { + glog.V(2).Infof("Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, uidAnnotationKey, fallbackUID) if cluster.ObjectMeta.Annotations == nil { cluster.ObjectMeta.Annotations = map[string]string{} } @@ -643,7 +645,7 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio return fallbackUID, nil } } else { - glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is invalid (%q). This probably means that no clusters have an ingress controller configmap with key %q. Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, fallbackUID, uidKey) + glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is nil. This probably means that no clusters have an ingress controller configmap with key %q. Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, uidKey) return "", err } } diff --git a/federation/pkg/federation-controller/namespace/BUILD b/federation/pkg/federation-controller/namespace/BUILD deleted file mode 100644 index 1216d707201..00000000000 --- a/federation/pkg/federation-controller/namespace/BUILD +++ /dev/null @@ -1,75 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["namespace_controller.go"], - tags = ["automanaged"], - deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset:go_default_library", - "//federation/pkg/federation-controller/util:go_default_library", - "//federation/pkg/federation-controller/util/deletionhelper:go_default_library", - "//federation/pkg/federation-controller/util/eventsink:go_default_library", - "//pkg/api:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/controller/namespace/deletion:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["namespace_controller_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/fake:go_default_library", - "//federation/pkg/federation-controller/util:go_default_library", - "//federation/pkg/federation-controller/util/deletionhelper:go_default_library", - "//federation/pkg/federation-controller/util/test:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/fake:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/github.com/stretchr/testify/require:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/federation/pkg/federation-controller/namespace/namespace_controller.go b/federation/pkg/federation-controller/namespace/namespace_controller.go deleted file mode 100644 index 9979ac8c1df..00000000000 --- a/federation/pkg/federation-controller/namespace/namespace_controller.go +++ /dev/null @@ -1,460 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "fmt" - "time" - - apiv1 "k8s.io/api/core/v1" - clientv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" - federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" - federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - "k8s.io/kubernetes/federation/pkg/federation-controller/util" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" - "k8s.io/kubernetes/pkg/api" - kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/namespace/deletion" - - "github.com/golang/glog" -) - -const ( - allClustersKey = "ALL_CLUSTERS" - ControllerName = "namespaces" - UserAgentName = "federation-namespace-controller" -) - -var ( - RequiredResources = []schema.GroupVersionResource{apiv1.SchemeGroupVersion.WithResource("namespaces")} -) - -type NamespaceController struct { - // For triggering single namespace reconciliation. This is used when there is an - // add/update/delete operation on a namespace in either federated API server or - // in some member of the federation. - namespaceDeliverer *util.DelayingDeliverer - - // For triggering all namespaces reconciliation. This is used when - // a new cluster becomes available. - clusterDeliverer *util.DelayingDeliverer - - // Contains namespaces present in members of federation. - namespaceFederatedInformer util.FederatedInformer - // For updating members of federation. - federatedUpdater util.FederatedUpdater - // Definitions of namespaces that should be federated. - namespaceInformerStore cache.Store - // Informer controller for namespaces that should be federated. - namespaceInformerController cache.Controller - - // Client to federated api server. - federatedApiClient federationclientset.Interface - - // Backoff manager for namespaces - namespaceBackoff *flowcontrol.Backoff - - // For events - eventRecorder record.EventRecorder - - deletionHelper *deletionhelper.DeletionHelper - - // Helper to delete all resources in a namespace. - namespacedResourcesDeleter deletion.NamespacedResourcesDeleterInterface - - namespaceReviewDelay time.Duration - clusterAvailableDelay time.Duration - smallDelay time.Duration - updateTimeout time.Duration -} - -// NewNamespaceController returns a new namespace controller -func NewNamespaceController(client federationclientset.Interface, dynamicClientPool dynamic.ClientPool) *NamespaceController { - broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) - recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: UserAgentName}) - - nc := &NamespaceController{ - federatedApiClient: client, - namespaceReviewDelay: time.Second * 10, - clusterAvailableDelay: time.Second * 20, - smallDelay: time.Second * 3, - updateTimeout: time.Second * 30, - namespaceBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), - eventRecorder: recorder, - } - - // Build deliverers for triggering reconciliations. - nc.namespaceDeliverer = util.NewDelayingDeliverer() - nc.clusterDeliverer = util.NewDelayingDeliverer() - - // Start informer in federated API servers on namespaces that should be federated. - nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return client.Core().Namespaces().List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.Core().Namespaces().Watch(options) - }, - }, - &apiv1.Namespace{}, - controller.NoResyncPeriodFunc(), - util.NewTriggerOnAllChanges(func(obj runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) })) - - // Federated informer on namespaces in members of federation. - nc.namespaceFederatedInformer = util.NewFederatedInformer( - client, - func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { - return cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return targetClient.Core().Namespaces().List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return targetClient.Core().Namespaces().Watch(options) - }, - }, - &apiv1.Namespace{}, - controller.NoResyncPeriodFunc(), - // Trigger reconciliation whenever something in federated cluster is changed. In most cases it - // would be just confirmation that some namespace operation succeeded. - util.NewTriggerOnMetaAndSpecChanges( - func(obj runtime.Object) { nc.deliverNamespaceObj(obj, nc.namespaceReviewDelay, false) }, - )) - }, - &util.ClusterLifecycleHandlerFuncs{ - ClusterAvailable: func(cluster *federationapi.Cluster) { - // When new cluster becomes available process all the namespaces again. - nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay) - }, - }, - ) - - // Federated updater along with Create/Update/Delete operations. - nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer, "namespace", nc.updateTimeout, nc.eventRecorder, - func(client kubeclientset.Interface, obj runtime.Object) error { - namespace := obj.(*apiv1.Namespace) - _, err := client.Core().Namespaces().Create(namespace) - return err - }, - func(client kubeclientset.Interface, obj runtime.Object) error { - namespace := obj.(*apiv1.Namespace) - _, err := client.Core().Namespaces().Update(namespace) - return err - }, - func(client kubeclientset.Interface, obj runtime.Object) error { - namespace := obj.(*apiv1.Namespace) - orphanDependents := false - err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) - // IsNotFound error is fine since that means the object is deleted already. - if errors.IsNotFound(err) { - return nil - } - return err - }) - - nc.deletionHelper = deletionhelper.NewDeletionHelper( - nc.updateNamespace, - // objNameFunc - func(obj runtime.Object) string { - namespace := obj.(*apiv1.Namespace) - return fmt.Sprintf("%s/%s", namespace.Namespace, namespace.Name) - }, - nc.namespaceFederatedInformer, - nc.federatedUpdater, - ) - - discoverResourcesFn := nc.federatedApiClient.Discovery().ServerPreferredNamespacedResources - nc.namespacedResourcesDeleter = deletion.NewNamespacedResourcesDeleter( - client.Core().Namespaces(), dynamicClientPool, nil, - discoverResourcesFn, apiv1.FinalizerKubernetes, false) - return nc -} - -// Sends the given update object to apiserver. -// Assumes that the given object is a namespace. -func (nc *NamespaceController) updateNamespace(obj runtime.Object) (runtime.Object, error) { - namespace := obj.(*apiv1.Namespace) - return nc.federatedApiClient.Core().Namespaces().Update(namespace) -} - -// Returns true if the given object has the given finalizer in its NamespaceSpec. -func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finalizer apiv1.FinalizerName) bool { - namespace := obj.(*apiv1.Namespace) - for i := range namespace.Spec.Finalizers { - if namespace.Spec.Finalizers[i] == finalizer { - return true - } - } - return false -} - -// Removes the finalizer from the given objects NamespaceSpec. -func (nc *NamespaceController) removeFinalizerFromSpec(namespace *apiv1.Namespace, finalizer apiv1.FinalizerName) (*apiv1.Namespace, error) { - updatedFinalizers := []apiv1.FinalizerName{} - for i := range namespace.Spec.Finalizers { - if namespace.Spec.Finalizers[i] != finalizer { - updatedFinalizers = append(updatedFinalizers, namespace.Spec.Finalizers[i]) - } - } - namespace.Spec.Finalizers = updatedFinalizers - updatedNamespace, err := nc.federatedApiClient.Core().Namespaces().Finalize(namespace) - if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from namespace %s: %v", string(finalizer), namespace.Name, err) - } - return updatedNamespace, nil -} - -func (nc *NamespaceController) Run(stopChan <-chan struct{}) { - go nc.namespaceInformerController.Run(stopChan) - nc.namespaceFederatedInformer.Start() - go func() { - <-stopChan - nc.namespaceFederatedInformer.Stop() - }() - nc.namespaceDeliverer.StartWithHandler(func(item *util.DelayingDelivererItem) { - namespace := item.Value.(string) - nc.reconcileNamespace(namespace) - }) - nc.clusterDeliverer.StartWithHandler(func(_ *util.DelayingDelivererItem) { - nc.reconcileNamespacesOnClusterChange() - }) - util.StartBackoffGC(nc.namespaceBackoff, stopChan) -} - -func (nc *NamespaceController) deliverNamespaceObj(obj interface{}, delay time.Duration, failed bool) { - namespace := obj.(*apiv1.Namespace) - nc.deliverNamespace(namespace.Name, delay, failed) -} - -// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure. -func (nc *NamespaceController) deliverNamespace(namespace string, delay time.Duration, failed bool) { - if failed { - nc.namespaceBackoff.Next(namespace, time.Now()) - delay = delay + nc.namespaceBackoff.Get(namespace) - } else { - nc.namespaceBackoff.Reset(namespace) - } - nc.namespaceDeliverer.DeliverAfter(namespace, namespace, delay) -} - -// Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet -// synced with the corresponding api server. -func (nc *NamespaceController) isSynced() bool { - if !nc.namespaceFederatedInformer.ClustersSynced() { - glog.V(2).Infof("Cluster list not synced") - return false - } - clusters, err := nc.namespaceFederatedInformer.GetReadyClusters() - if err != nil { - glog.Errorf("Failed to get ready clusters: %v", err) - return false - } - if !nc.namespaceFederatedInformer.GetTargetStore().ClustersSynced(clusters) { - return false - } - return true -} - -// The function triggers reconciliation of all federated namespaces. -func (nc *NamespaceController) reconcileNamespacesOnClusterChange() { - if !nc.isSynced() { - nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay) - } - for _, obj := range nc.namespaceInformerStore.List() { - namespace := obj.(*apiv1.Namespace) - nc.deliverNamespace(namespace.Name, nc.smallDelay, false) - } -} - -func (nc *NamespaceController) reconcileNamespace(namespace string) { - if !nc.isSynced() { - nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false) - return - } - - namespaceObjFromStore, exist, err := nc.namespaceInformerStore.GetByKey(namespace) - if err != nil { - glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err) - nc.deliverNamespace(namespace, 0, true) - return - } - - if !exist { - // Not federated namespace, ignoring. - return - } - // Create a copy before modifying the namespace to prevent race condition with - // other readers of namespace from store. - namespaceObj, err := api.Scheme.DeepCopy(namespaceObjFromStore) - baseNamespace, ok := namespaceObj.(*apiv1.Namespace) - if err != nil || !ok { - glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) - nc.deliverNamespace(namespace, 0, true) - return - } - if baseNamespace.DeletionTimestamp != nil { - if err := nc.delete(baseNamespace); err != nil { - glog.Errorf("Failed to delete %s: %v", namespace, err) - nc.eventRecorder.Eventf(baseNamespace, api.EventTypeWarning, "DeleteFailed", - "Namespace delete failed: %v", err) - nc.deliverNamespace(namespace, 0, true) - } - return - } - - glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for namespace: %s", - baseNamespace.Name) - // Add the required finalizers before creating a namespace in - // underlying clusters. - // This ensures that the dependent namespaces are deleted in underlying - // clusters when the federated namespace is deleted. - updatedNamespaceObj, err := nc.deletionHelper.EnsureFinalizers(baseNamespace) - if err != nil { - glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in namespace %s: %v", - baseNamespace.Name, err) - nc.deliverNamespace(namespace, 0, false) - return - } - baseNamespace = updatedNamespaceObj.(*apiv1.Namespace) - - glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name) - // Sync the namespace in all underlying clusters. - clusters, err := nc.namespaceFederatedInformer.GetReadyClusters() - if err != nil { - glog.Errorf("Failed to get cluster list: %v", err) - nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false) - return - } - - operations := make([]util.FederatedOperation, 0) - for _, cluster := range clusters { - clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace) - if err != nil { - glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err) - nc.deliverNamespace(namespace, 0, true) - return - } - // The object should not be modified. - desiredNamespace := &apiv1.Namespace{ - ObjectMeta: util.DeepCopyRelevantObjectMeta(baseNamespace.ObjectMeta), - Spec: *(util.DeepCopyApiTypeOrPanic(&baseNamespace.Spec).(*apiv1.NamespaceSpec)), - } - glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace) - - if !found { - operations = append(operations, util.FederatedOperation{ - Type: util.OperationTypeAdd, - Obj: desiredNamespace, - ClusterName: cluster.Name, - Key: namespace, - }) - } else { - clusterNamespace := clusterNamespaceObj.(*apiv1.Namespace) - - // Update existing namespace, if needed. - if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) { - operations = append(operations, util.FederatedOperation{ - Type: util.OperationTypeUpdate, - Obj: desiredNamespace, - ClusterName: cluster.Name, - Key: namespace, - }) - } - } - } - - if len(operations) == 0 { - // Everything is in order - return - } - glog.V(2).Infof("Updating namespace %s in underlying clusters. Operations: %d", baseNamespace.Name, len(operations)) - - err = nc.federatedUpdater.Update(operations) - if err != nil { - glog.Errorf("Failed to execute updates for %s: %v", namespace, err) - nc.deliverNamespace(namespace, 0, true) - return - } - - // Everything is in order but lets be double sure - nc.deliverNamespace(namespace, nc.namespaceReviewDelay, false) -} - -// delete deletes the given namespace or returns error if the deletion was not complete. -func (nc *NamespaceController) delete(namespace *apiv1.Namespace) error { - // Set Terminating status. - updatedNamespace := &apiv1.Namespace{ - ObjectMeta: namespace.ObjectMeta, - Spec: namespace.Spec, - Status: apiv1.NamespaceStatus{ - Phase: apiv1.NamespaceTerminating, - }, - } - var err error - if namespace.Status.Phase != apiv1.NamespaceTerminating { - glog.V(2).Infof("Marking ns %s as terminating", namespace.Name) - nc.eventRecorder.Event(namespace, api.EventTypeNormal, "DeleteNamespace", fmt.Sprintf("Marking for deletion")) - _, err = nc.federatedApiClient.Core().Namespaces().Update(updatedNamespace) - if err != nil { - return fmt.Errorf("failed to update namespace: %v", err) - } - } - - if nc.hasFinalizerFuncInSpec(updatedNamespace, apiv1.FinalizerKubernetes) { - // Delete resources in this namespace. - err = nc.namespacedResourcesDeleter.Delete(updatedNamespace.Name) - if err != nil { - return fmt.Errorf("error in deleting resources in namespace %s: %v", namespace.Name, err) - } - glog.V(2).Infof("Removed kubernetes finalizer from ns %s", namespace.Name) - // Fetch the updated Namespace. - updatedNamespace, err = nc.federatedApiClient.Core().Namespaces().Get(updatedNamespace.Name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("error in fetching updated namespace %s: %s", updatedNamespace.Name, err) - } - } - - // Delete the namespace from all underlying clusters. - _, err = nc.deletionHelper.HandleObjectInUnderlyingClusters(updatedNamespace) - if err != nil { - return err - } - - err = nc.federatedApiClient.Core().Namespaces().Delete(namespace.Name, nil) - if err != nil { - // Its all good if the error is not found error. That means it is deleted already and we do not have to do anything. - // This is expected when we are processing an update as a result of namespace finalizer deletion. - // The process that deleted the last finalizer is also going to delete the namespace and we do not have to do anything. - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to delete namespace: %v", err) - } - } - return nil -} diff --git a/federation/pkg/federation-controller/namespace/namespace_controller_test.go b/federation/pkg/federation-controller/namespace/namespace_controller_test.go deleted file mode 100644 index 461f84c65c4..00000000000 --- a/federation/pkg/federation-controller/namespace/namespace_controller_test.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "fmt" - "testing" - "time" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - restclient "k8s.io/client-go/rest" - core "k8s.io/client-go/testing" - federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" - fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/fake" - "k8s.io/kubernetes/federation/pkg/federation-controller/util" - "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" - . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" - kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - namespaces string = "namespaces" - clusters string = "clusters" -) - -func TestNamespaceController(t *testing.T) { - cluster1 := NewCluster("cluster1", apiv1.ConditionTrue) - cluster2 := NewCluster("cluster2", apiv1.ConditionTrue) - ns1 := apiv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-namespace", - SelfLink: "/api/v1/namespaces/test-namespace", - }, - Spec: apiv1.NamespaceSpec{ - Finalizers: []apiv1.FinalizerName{apiv1.FinalizerKubernetes}, - }, - } - - fakeClient := &fakefedclientset.Clientset{} - RegisterFakeList(clusters, &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}}) - RegisterFakeList(namespaces, &fakeClient.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}}) - namespaceWatch := RegisterFakeWatch(namespaces, &fakeClient.Fake) - namespaceUpdateChan := RegisterFakeCopyOnUpdate(namespaces, &fakeClient.Fake, namespaceWatch) - clusterWatch := RegisterFakeWatch(clusters, &fakeClient.Fake) - - cluster1Client := &fakekubeclientset.Clientset{} - cluster1Watch := RegisterFakeWatch(namespaces, &cluster1Client.Fake) - RegisterFakeList(namespaces, &cluster1Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}}) - cluster1CreateChan := RegisterFakeCopyOnCreate(namespaces, &cluster1Client.Fake, cluster1Watch) - cluster1UpdateChan := RegisterFakeCopyOnUpdate(namespaces, &cluster1Client.Fake, cluster1Watch) - - cluster2Client := &fakekubeclientset.Clientset{} - cluster2Watch := RegisterFakeWatch(namespaces, &cluster2Client.Fake) - RegisterFakeList(namespaces, &cluster2Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}}) - cluster2CreateChan := RegisterFakeCopyOnCreate(namespaces, &cluster2Client.Fake, cluster2Watch) - - nsDeleteChan := RegisterDelete(&fakeClient.Fake, namespaces) - namespaceController := NewNamespaceController(fakeClient, dynamic.NewDynamicClientPool(&restclient.Config{})) - informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) { - switch cluster.Name { - case cluster1.Name: - return cluster1Client, nil - case cluster2.Name: - return cluster2Client, nil - default: - return nil, fmt.Errorf("Unknown cluster") - } - } - setClientFactory(namespaceController.namespaceFederatedInformer, informerClientFactory) - namespaceController.clusterAvailableDelay = time.Second - namespaceController.namespaceReviewDelay = 50 * time.Millisecond - namespaceController.smallDelay = 20 * time.Millisecond - namespaceController.updateTimeout = 5 * time.Second - - stop := make(chan struct{}) - namespaceController.Run(stop) - - // Test add federated namespace. - namespaceWatch.Add(&ns1) - // Verify that the DeleteFromUnderlyingClusters finalizer is added to the namespace. - updatedNamespace := GetNamespaceFromChan(namespaceUpdateChan) - require.NotNil(t, updatedNamespace) - AssertHasFinalizer(t, updatedNamespace, deletionhelper.FinalizerDeleteFromUnderlyingClusters) - ns1 = *updatedNamespace - - // Verify that the namespace is created in underlying cluster1. - createdNamespace := GetNamespaceFromChan(cluster1CreateChan) - require.NotNil(t, createdNamespace) - assert.Equal(t, ns1.Name, createdNamespace.Name) - - // Wait for the namespace to appear in the informer store - err := WaitForStoreUpdate( - namespaceController.namespaceFederatedInformer.GetTargetStore(), - cluster1.Name, ns1.Name, wait.ForeverTestTimeout) - assert.Nil(t, err, "namespace should have appeared in the informer store") - - // Test update federated namespace. - ns1.Annotations = map[string]string{ - "A": "B", - } - namespaceWatch.Modify(&ns1) - assert.NoError(t, CheckObjectFromChan(cluster1UpdateChan, MetaAndSpecCheckingFunction(&ns1))) - - // Test add cluster - clusterWatch.Add(cluster2) - createdNamespace2 := GetNamespaceFromChan(cluster2CreateChan) - require.NotNil(t, createdNamespace2) - assert.Equal(t, ns1.Name, createdNamespace2.Name) - assert.Contains(t, createdNamespace2.Annotations, "A") - - // Delete the namespace with orphan finalizer (let namespaces - // in underlying clusters be as is). - // TODO: Add a test without orphan finalizer. - ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, metav1.FinalizerOrphanDependents) - ns1.DeletionTimestamp = &metav1.Time{Time: time.Now()} - namespaceWatch.Modify(&ns1) - assert.Equal(t, ns1.Name, GetStringFromChan(nsDeleteChan)) - // TODO: Add a test for verifying that resources in the namespace are deleted - // when the namespace is deleted. - // Need a fake dynamic client to mock list and delete actions to be able to test this. - // TODO: Add a fake dynamic client and test this. - // In the meantime, e2e test verify that the resources in a namespace are - // deleted when the namespace is deleted. - close(stop) -} - -func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) { - testInformer := ToFederatedInformerForTestOnly(informer) - testInformer.SetClientFactory(informerClientFactory) -} - -func RegisterDeleteCollection(client *core.Fake, resource string) chan string { - deleteChan := make(chan string, 100) - client.AddReactor("delete-collection", resource, func(action core.Action) (bool, runtime.Object, error) { - deleteChan <- "all" - return true, nil, nil - }) - return deleteChan -} - -func RegisterDelete(client *core.Fake, resource string) chan string { - deleteChan := make(chan string, 100) - client.AddReactor("delete", resource, func(action core.Action) (bool, runtime.Object, error) { - deleteAction := action.(core.DeleteAction) - deleteChan <- deleteAction.GetName() - return true, nil, nil - }) - return deleteChan -} - -func GetStringFromChan(c chan string) string { - select { - case str := <-c: - return str - case <-time.After(5 * time.Second): - return "timedout" - } -} - -func GetNamespaceFromChan(c chan runtime.Object) *apiv1.Namespace { - if namespace := GetObjectFromChan(c); namespace == nil { - return nil - } else { - return namespace.(*apiv1.Namespace) - } -} diff --git a/federation/pkg/federation-controller/service/servicecontroller.go b/federation/pkg/federation-controller/service/servicecontroller.go index 72c03698e83..71515ef5060 100644 --- a/federation/pkg/federation-controller/service/servicecontroller.go +++ b/federation/pkg/federation-controller/service/servicecontroller.go @@ -409,7 +409,7 @@ func (s *ServiceController) reconcileService(key string) reconciliationStatus { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - runtime.HandleError(fmt.Errorf("Invalid key %q recieved, unable to split key to namespace and name, err: %v", key, err)) + runtime.HandleError(fmt.Errorf("Invalid key %q received, unable to split key to namespace and name, err: %v", key, err)) return statusNonRecoverableError } @@ -432,7 +432,7 @@ func (s *ServiceController) reconcileService(key string) reconciliationStatus { } fedService, ok := fedServiceObj.(*v1.Service) if err != nil || !ok { - runtime.HandleError(fmt.Errorf("Unknown obj recieved from store: %#v, %v", fedServiceObj, err)) + runtime.HandleError(fmt.Errorf("Unknown obj received from store: %#v, %v", fedServiceObj, err)) return statusNonRecoverableError } diff --git a/federation/pkg/federation-controller/sync/BUILD b/federation/pkg/federation-controller/sync/BUILD index 1258db9f168..3e02fc9bc72 100644 --- a/federation/pkg/federation-controller/sync/BUILD +++ b/federation/pkg/federation-controller/sync/BUILD @@ -28,7 +28,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", @@ -42,28 +41,18 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "controller_test.go", - "replicasetcontroller_test.go", - ], + srcs = ["controller_test.go"], library = ":go_default_library", tags = ["automanaged"], deps = [ "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/fake:go_default_library", "//federation/pkg/federatedtypes:go_default_library", "//federation/pkg/federation-controller/util:go_default_library", "//federation/pkg/federation-controller/util/test:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/fake:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", ], ) diff --git a/federation/pkg/federation-controller/sync/controller.go b/federation/pkg/federation-controller/sync/controller.go index 38eb0fc65ed..f5a8331bfb5 100644 --- a/federation/pkg/federation-controller/sync/controller.go +++ b/federation/pkg/federation-controller/sync/controller.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -97,7 +96,7 @@ type FederationSyncController struct { func StartFederationSyncController(kind string, adapterFactory federatedtypes.AdapterFactory, config *restclient.Config, stopChan <-chan struct{}, minimizeLatency bool) { restclient.AddUserAgent(config, fmt.Sprintf("federation-%s-controller", kind)) client := federationclientset.NewForConfigOrDie(config) - adapter := adapterFactory(client) + adapter := adapterFactory(client, config) controller := newFederationSyncController(client, adapter) if minimizeLatency { controller.minimizeLatency() @@ -189,9 +188,9 @@ func newFederationSyncController(client federationclientset.Interface, adapter f return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { - namespacedName := adapter.NamespacedName(obj) + qualifiedName := adapter.QualifiedName(obj) orphanDependents := false - err := adapter.ClusterDelete(client, namespacedName, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) + err := adapter.ClusterDelete(client, qualifiedName, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -199,7 +198,7 @@ func newFederationSyncController(client federationclientset.Interface, adapter f s.updateObject, // objNameFunc func(obj pkgruntime.Object) string { - return adapter.NamespacedName(obj).String() + return adapter.QualifiedName(obj).String() }, s.informer, s.updater, @@ -264,38 +263,38 @@ func (s *FederationSyncController) worker() { } item := obj.(*util.DelayingDelivererItem) - namespacedName := item.Value.(*types.NamespacedName) - status := s.reconcile(*namespacedName) + qualifiedName := item.Value.(*federatedtypes.QualifiedName) + status := s.reconcile(*qualifiedName) s.workQueue.Done(item) switch status { case statusAllOK: break case statusError: - s.deliver(*namespacedName, 0, true) + s.deliver(*qualifiedName, 0, true) case statusNeedsRecheck: - s.deliver(*namespacedName, s.reviewDelay, false) + s.deliver(*qualifiedName, s.reviewDelay, false) case statusNotSynced: - s.deliver(*namespacedName, s.clusterAvailableDelay, false) + s.deliver(*qualifiedName, s.clusterAvailableDelay, false) } } } func (s *FederationSyncController) deliverObj(obj pkgruntime.Object, delay time.Duration, failed bool) { - namespacedName := s.adapter.NamespacedName(obj) - s.deliver(namespacedName, delay, failed) + qualifiedName := s.adapter.QualifiedName(obj) + s.deliver(qualifiedName, delay, failed) } // Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure. -func (s *FederationSyncController) deliver(namespacedName types.NamespacedName, delay time.Duration, failed bool) { - key := namespacedName.String() +func (s *FederationSyncController) deliver(qualifiedName federatedtypes.QualifiedName, delay time.Duration, failed bool) { + key := qualifiedName.String() if failed { s.backoff.Next(key, time.Now()) delay = delay + s.backoff.Get(key) } else { s.backoff.Reset(key) } - s.deliverer.DeliverAfter(key, &namespacedName, delay) + s.deliverer.DeliverAfter(key, &qualifiedName, delay) } // Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet @@ -322,18 +321,18 @@ func (s *FederationSyncController) reconcileOnClusterChange() { s.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterAvailableDelay)) } for _, obj := range s.store.List() { - namespacedName := s.adapter.NamespacedName(obj.(pkgruntime.Object)) - s.deliver(namespacedName, s.smallDelay, false) + qualifiedName := s.adapter.QualifiedName(obj.(pkgruntime.Object)) + s.deliver(qualifiedName, s.smallDelay, false) } } -func (s *FederationSyncController) reconcile(namespacedName types.NamespacedName) reconciliationStatus { +func (s *FederationSyncController) reconcile(qualifiedName federatedtypes.QualifiedName) reconciliationStatus { if !s.isSynced() { return statusNotSynced } kind := s.adapter.Kind() - key := namespacedName.String() + key := qualifiedName.String() glog.V(4).Infof("Starting to reconcile %v %v", kind, key) startTime := time.Now() @@ -349,10 +348,10 @@ func (s *FederationSyncController) reconcile(namespacedName types.NamespacedName meta := s.adapter.ObjectMeta(obj) if meta.DeletionTimestamp != nil { - err := s.delete(obj, kind, namespacedName) + err := s.delete(obj, kind, qualifiedName) if err != nil { msg := "Failed to delete %s %q: %v" - args := []interface{}{kind, namespacedName, err} + args := []interface{}{kind, qualifiedName, err} runtime.HandleError(fmt.Errorf(msg, args...)) s.eventRecorder.Eventf(obj, api.EventTypeWarning, "DeleteFailed", msg, args...) return statusError @@ -415,14 +414,25 @@ func (s *FederationSyncController) objFromCache(kind, key string) (pkgruntime.Ob } // delete deletes the given resource or returns error if the deletion was not complete. -func (s *FederationSyncController) delete(obj pkgruntime.Object, kind string, namespacedName types.NamespacedName) error { - glog.V(3).Infof("Handling deletion of %s %q", kind, namespacedName) +func (s *FederationSyncController) delete(obj pkgruntime.Object, kind string, qualifiedName federatedtypes.QualifiedName) error { + glog.V(3).Infof("Handling deletion of %s %q", kind, qualifiedName) + + // Perform pre-deletion cleanup for the namespace adapter + namespaceAdapter, ok := s.adapter.(*federatedtypes.NamespaceAdapter) + if ok { + var err error + obj, err = namespaceAdapter.CleanUpNamespace(obj, s.eventRecorder) + if err != nil { + return err + } + } + _, err := s.deletionHelper.HandleObjectInUnderlyingClusters(obj) if err != nil { return err } - err = s.adapter.FedDelete(namespacedName, nil) + err = s.adapter.FedDelete(qualifiedName, nil) if err != nil { // Its all good if the error is not found error. That means it is deleted already and we do not have to do anything. // This is expected when we are processing an update as a result of finalizer deletion. diff --git a/federation/pkg/federation-controller/sync/replicasetcontroller_test.go b/federation/pkg/federation-controller/sync/replicasetcontroller_test.go deleted file mode 100644 index fa94708677d..00000000000 --- a/federation/pkg/federation-controller/sync/replicasetcontroller_test.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sync - -import ( - "flag" - "fmt" - "testing" - "time" - - apiv1 "k8s.io/api/core/v1" - extensionsv1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - core "k8s.io/client-go/testing" - fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" - fedclientfake "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/fake" - "k8s.io/kubernetes/federation/pkg/federatedtypes" - testutil "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" - kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - kubeclientfake "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" - - "github.com/stretchr/testify/assert" -) - -const ( - pods = "pods" - replicasets = "replicasets" - k8s1 = "k8s-1" - k8s2 = "k8s-2" -) - -func TestReplicaSetController(t *testing.T) { - flag.Set("logtostderr", "true") - flag.Set("v", "5") - flag.Parse() - - fedclientset := fedclientfake.NewSimpleClientset() - fedrswatch := watch.NewFake() - fedclientset.PrependWatchReactor(replicasets, core.DefaultWatchReactor(fedrswatch, nil)) - - fedclientset.Federation().Clusters().Create(testutil.NewCluster(k8s1, apiv1.ConditionTrue)) - fedclientset.Federation().Clusters().Create(testutil.NewCluster(k8s2, apiv1.ConditionTrue)) - - kube1clientset := kubeclientfake.NewSimpleClientset() - kube1rswatch := watch.NewFake() - kube1clientset.PrependWatchReactor(replicasets, core.DefaultWatchReactor(kube1rswatch, nil)) - kube1Podwatch := watch.NewFake() - kube1clientset.PrependWatchReactor(pods, core.DefaultWatchReactor(kube1Podwatch, nil)) - kube2clientset := kubeclientfake.NewSimpleClientset() - kube2rswatch := watch.NewFake() - kube2clientset.PrependWatchReactor(replicasets, core.DefaultWatchReactor(kube2rswatch, nil)) - kube2Podwatch := watch.NewFake() - kube2clientset.PrependWatchReactor(pods, core.DefaultWatchReactor(kube2Podwatch, nil)) - - fedInformerClientFactory := func(cluster *fedv1.Cluster) (kubeclientset.Interface, error) { - switch cluster.Name { - case k8s1: - return kube1clientset, nil - case k8s2: - return kube2clientset, nil - default: - return nil, fmt.Errorf("Unknown cluster: %v", cluster.Name) - } - } - replicaSetController := newFederationSyncController(fedclientset, federatedtypes.NewReplicaSetAdapter(fedclientset)) - replicaSetController.minimizeLatency() - rsFedinformer := testutil.ToFederatedInformerForTestOnly(replicaSetController.informer) - rsFedinformer.SetClientFactory(fedInformerClientFactory) - - stopChan := make(chan struct{}) - defer close(stopChan) - go replicaSetController.Run(stopChan) - - rs := newReplicaSetWithReplicas("rs", 9) - rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Create(rs) - fedrswatch.Add(rs) - time.Sleep(2 * time.Second) - - rs1, _ := kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - kube1rswatch.Add(rs1) - rs1.Status.Replicas = *rs1.Spec.Replicas - rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas - rs1.Status.ReadyReplicas = *rs1.Spec.Replicas - rs1.Status.AvailableReplicas = *rs1.Spec.Replicas - rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs1) - kube1rswatch.Modify(rs1) - - rs2, _ := kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - kube2rswatch.Add(rs2) - rs2.Status.Replicas = *rs2.Spec.Replicas - rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas - rs2.Status.ReadyReplicas = *rs2.Spec.Replicas - rs2.Status.AvailableReplicas = *rs2.Spec.Replicas - rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs2) - kube2rswatch.Modify(rs2) - - time.Sleep(2 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) - assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) - assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) - assert.Equal(t, rs.Status.ReadyReplicas, rs1.Status.ReadyReplicas+rs2.Status.ReadyReplicas) - assert.Equal(t, rs.Status.AvailableReplicas, rs1.Status.AvailableReplicas+rs2.Status.AvailableReplicas) - - var replicas int32 = 20 - rs.Spec.Replicas = &replicas - rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Update(rs) - fedrswatch.Modify(rs) - time.Sleep(2 * time.Second) - - rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - rs1.Status.Replicas = *rs1.Spec.Replicas - rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas - rs1.Status.ReadyReplicas = *rs1.Spec.Replicas - rs1.Status.AvailableReplicas = *rs1.Spec.Replicas - rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs1) - kube1rswatch.Modify(rs1) - - rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - rs2.Status.Replicas = *rs2.Spec.Replicas - rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas - rs2.Status.ReadyReplicas = *rs2.Spec.Replicas - rs2.Status.AvailableReplicas = *rs2.Spec.Replicas - rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs2) - kube2rswatch.Modify(rs2) - - time.Sleep(1 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) - assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) - assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) - assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) - assert.Equal(t, rs.Status.ReadyReplicas, rs1.Status.ReadyReplicas+rs2.Status.ReadyReplicas) - assert.Equal(t, rs.Status.AvailableReplicas, rs1.Status.AvailableReplicas+rs2.Status.AvailableReplicas) -} - -func newReplicaSetWithReplicas(name string, replicas int32) *extensionsv1.ReplicaSet { - return &extensionsv1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: metav1.NamespaceDefault, - SelfLink: "/api/v1/namespaces/default/replicasets/name", - }, - Spec: extensionsv1.ReplicaSetSpec{ - Replicas: &replicas, - }, - } -} diff --git a/federation/pkg/kubefed/init/init_test.go b/federation/pkg/kubefed/init/init_test.go index 55071cb1008..7fc902a1cf5 100644 --- a/federation/pkg/kubefed/init/init_test.go +++ b/federation/pkg/kubefed/init/init_test.go @@ -1444,7 +1444,11 @@ func tlsHandshake(t *testing.T, sCfg, cCfg *tls.Config) error { } }() - c, err := tls.Dial("tcp", s.Addr().String(), cCfg) + // workaround [::] not working in ipv4 only systems (https://github.com/golang/go/issues/18806) + // TODO: remove with Golang 1.9 with https://go-review.googlesource.com/c/45088/ + addr := strings.TrimPrefix(s.Addr().String(), "[::]") + + c, err := tls.Dial("tcp", addr, cCfg) if err != nil { // Intentionally not serializing the error received because we want to // test for the failure case in the caller test function. diff --git a/federation/pkg/kubefed/join.go b/federation/pkg/kubefed/join.go index 0962d455c2f..c004fb32a9f 100644 --- a/federation/pkg/kubefed/join.go +++ b/federation/pkg/kubefed/join.go @@ -209,7 +209,7 @@ func (j *joinFederation) Run(f cmdutil.Factory, cmdOut io.Writer, config util.Ad glog.V(2).Info("Creating federation system namespace in joining cluster") _, err = createFederationSystemNamespace(joiningClusterClientset, federationNamespace, federationName, joiningClusterName, dryRun) if err != nil { - glog.V(2).Info("Error creating federation system namespace in joining cluster: %v", err) + glog.V(2).Infof("Error creating federation system namespace in joining cluster: %v", err) return err } glog.V(2).Info("Created federation system namespace in joining cluster") @@ -218,7 +218,7 @@ func (j *joinFederation) Run(f cmdutil.Factory, cmdOut io.Writer, config util.Ad po.LoadingRules.ExplicitPath = kubeconfig clientConfig, err := po.GetStartingConfig() if err != nil { - glog.V(2).Info("Could not load clientConfig from %s: %v", kubeconfig, err) + glog.V(2).Infof("Could not load clientConfig from %s: %v", kubeconfig, err) return err } @@ -662,7 +662,7 @@ func populateSecretInHostCluster(clusterClientset, hostClientset internalclients return nil, err } - glog.V(2).Info("Getting secret named: %s", sa.Secrets[0].Name) + glog.V(2).Infof("Getting secret named: %s", sa.Secrets[0].Name) var secret *api.Secret err = wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) { var err error diff --git a/hack/.linted_packages b/hack/.linted_packages index 8b59a161dea..ac486f7762a 100644 --- a/hack/.linted_packages +++ b/hack/.linted_packages @@ -14,6 +14,7 @@ cmd/kube-apiserver cmd/kube-apiserver/app cmd/kube-apiserver/app/options cmd/kube-apiserver/app/preflight +cmd/kube-apiserver/app/testing cmd/kube-controller-manager cmd/kube-controller-manager/app/options cmd/kube-proxy @@ -22,7 +23,9 @@ cmd/kubeadm/app/apis/kubeadm/install cmd/kubeadm/app/discovery/https cmd/kubeadm/app/phases/apiconfig cmd/kubeadm/app/phases/certs +cmd/kubeadm/app/phases/controlplane cmd/kubeadm/app/phases/kubeconfig +cmd/kubeadm/app/phases/selfhosting cmd/kubectl cmd/kubelet cmd/libs/go2idl/client-gen @@ -67,6 +70,7 @@ pkg/api/service pkg/api/v1 pkg/api/v1/node pkg/api/v1/service +pkg/apimachinery/tests pkg/apis/abac/v0 pkg/apis/abac/v1beta1 pkg/apis/admission/install @@ -201,10 +205,12 @@ pkg/controller/volume/attachdetach/util pkg/conversion pkg/conversion/queryparams pkg/credentialprovider/aws +pkg/credentialprovider/azure pkg/fieldpath pkg/fields pkg/hyperkube pkg/kubectl/cmd/util/openapi +pkg/kubectl/util/term pkg/kubelet/apis/cri pkg/kubelet/apis/stats/v1alpha1 pkg/kubelet/container @@ -238,8 +244,6 @@ pkg/registry/core/service/ipallocator/controller pkg/registry/core/service/ipallocator/storage pkg/registry/core/serviceaccount pkg/registry/extensions/podsecuritypolicy/storage -pkg/registry/extensions/thirdpartyresource -pkg/registry/extensions/thirdpartyresource/storage pkg/registry/rbac/clusterrole/storage pkg/registry/rbac/clusterrolebinding/storage pkg/registry/rbac/role/storage @@ -258,32 +262,21 @@ pkg/security/podsecuritypolicy/sysctl pkg/serviceaccount pkg/types pkg/util/async -pkg/util/errors pkg/util/flock -pkg/util/framer pkg/util/goroutinemap pkg/util/hash pkg/util/i18n pkg/util/interrupt -pkg/util/intstr pkg/util/io -pkg/util/json pkg/util/limitwriter pkg/util/logs pkg/util/maps pkg/util/metrics -pkg/util/net +pkg/util/net/sets pkg/util/netsh -pkg/util/rand -pkg/util/runtime -pkg/util/sets -pkg/util/sets/types +pkg/util/slice pkg/util/tail -pkg/util/validation -pkg/util/validation/field pkg/util/version -pkg/util/wait -pkg/util/yaml pkg/version/prometheus pkg/volume pkg/volume/downwardapi @@ -339,6 +332,7 @@ staging/src/k8s.io/apimachinery/pkg/api/equality staging/src/k8s.io/apimachinery/pkg/api/errors staging/src/k8s.io/apimachinery/pkg/api/resource staging/src/k8s.io/apimachinery/pkg/apimachinery +staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install staging/src/k8s.io/apimachinery/pkg/conversion/queryparams staging/src/k8s.io/apimachinery/pkg/fields staging/src/k8s.io/apimachinery/pkg/labels @@ -385,7 +379,6 @@ staging/src/k8s.io/client-go/discovery staging/src/k8s.io/client-go/examples/create-update-delete-deployment staging/src/k8s.io/client-go/examples/in-cluster-client-configuration staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration -staging/src/k8s.io/client-go/examples/third-party-resources-deprecated staging/src/k8s.io/client-go/informers staging/src/k8s.io/client-go/informers/admissionregistration staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1 @@ -444,6 +437,7 @@ staging/src/k8s.io/client-go/rest/watch staging/src/k8s.io/client-go/tools/auth staging/src/k8s.io/client-go/tools/metrics staging/src/k8s.io/client-go/tools/remotecommand +staging/src/k8s.io/client-go/transport/spdy staging/src/k8s.io/client-go/util/cert staging/src/k8s.io/client-go/util/homedir staging/src/k8s.io/client-go/util/workqueue @@ -462,6 +456,14 @@ staging/src/k8s.io/metrics/pkg/apis/custom_metrics/install staging/src/k8s.io/metrics/pkg/apis/metrics/install staging/src/k8s.io/sample-apiserver staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/externalversions +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/externalversions/wardle +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/externalversions/wardle/v1alpha1 +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/internalversion +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/internalversion/wardle +staging/src/k8s.io/sample-apiserver/pkg/client/informers_generated/internalversion/wardle/internalversion +staging/src/k8s.io/sample-apiserver/pkg/client/listers_generated/wardle/internalversion +staging/src/k8s.io/sample-apiserver/pkg/client/listers_generated/wardle/v1alpha1 test/e2e/framework/ginkgowrapper test/e2e/manifest test/e2e/perftype @@ -472,14 +474,15 @@ test/images/entrypoint-tester test/images/fakegitserver test/images/goproxy test/images/logs-generator -test/images/mount-tester +test/images/mounttest test/images/n-way-http test/images/net test/images/net/common test/images/port-forward-tester test/images/porter test/images/resource-consumer/consume-cpu -test/images/serve_hostname +test/images/serve-hostname +test/images/test-webserver test/integration/apiserver test/integration/client test/integration/configmap @@ -498,7 +501,6 @@ test/integration/quota test/integration/secrets test/integration/serviceaccount test/integration/storageclasses -test/integration/thirdparty test/integration/ttlcontroller test/soak/cauldron test/soak/serve_hostnames diff --git a/hack/OWNERS b/hack/OWNERS index 74582d5f9c9..4ece8cb16e2 100644 --- a/hack/OWNERS +++ b/hack/OWNERS @@ -7,6 +7,7 @@ reviewers: - lavalamp - spxtr - zmerlynn + - sttts approvers: - deads2k - eparis @@ -20,3 +21,4 @@ approvers: - shashidharatd - spxtr - zmerlynn + - sttts diff --git a/hack/e2e.go b/hack/e2e.go index 889754a6681..bc58a0ad8cf 100644 --- a/hack/e2e.go +++ b/hack/e2e.go @@ -144,8 +144,9 @@ func (t tester) getKubetest(get bool, old time.Duration) (string, error) { return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set") } log.Print("Updating kubetest binary...") - if err = t.wait("go", "get", "-u", "k8s.io/test-infra/kubetest"); err != nil { - return "", err // Could not upgrade + cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"} + if err = t.wait(cmd[0], cmd[1:]...); err != nil { + return "", fmt.Errorf("%s: %v", strings.Join(cmd, " "), err) // Could not upgrade } if p, err = t.lookKubetest(); err != nil { return "", err // Cannot find kubetest diff --git a/hack/e2e_test.go b/hack/e2e_test.go index b7cfce552c4..77aca4ed52f 100644 --- a/hack/e2e_test.go +++ b/hack/e2e_test.go @@ -190,10 +190,12 @@ func TestGetKubetest(t *testing.T) { p := "PATH" pk := filepath.Join(p, "kubetest") eu := errors.New("upgrade failed") + euVerbose := fmt.Errorf("go get -u k8s.io/test-infra/kubetest: %v", eu) et := errors.New("touch failed") cases := []struct { - get bool - old time.Duration + name string + get bool + old time.Duration stat string // stat succeeds on this file path bool // file exists on path @@ -205,7 +207,7 @@ func TestGetKubetest(t *testing.T) { returnPath string returnError error }{ - { // 0: Pass when on GOPATH/bin + {name: "0: Pass when on GOPATH/bin", get: false, old: 0, @@ -219,7 +221,7 @@ func TestGetKubetest(t *testing.T) { returnPath: gpk, returnError: nil, }, - { // 1: Pass when on PATH + {name: "1: Pass when on PATH", get: false, old: 0, @@ -233,7 +235,7 @@ func TestGetKubetest(t *testing.T) { returnPath: pk, returnError: nil, }, - { // 2: Don't upgrade if on PATH and GOPATH is "" + {name: "2: Don't upgrade if on PATH and GOPATH is ''", get: true, old: 0, @@ -247,7 +249,7 @@ func TestGetKubetest(t *testing.T) { returnPath: pk, returnError: nil, }, - { // 3: Don't upgrade on PATH when young. + {name: "3: Don't upgrade on PATH when young.", get: true, old: time.Hour, @@ -261,7 +263,7 @@ func TestGetKubetest(t *testing.T) { returnPath: pk, returnError: nil, }, - { // 4: Upgrade if old but GOPATH is set. + {name: "4: Upgrade if old but GOPATH is set.", get: true, old: 0, @@ -275,7 +277,7 @@ func TestGetKubetest(t *testing.T) { returnPath: pk, returnError: nil, }, - { // 5: Fail if upgrade fails + {name: "5: Fail if upgrade fails", get: true, old: 0, @@ -287,9 +289,9 @@ func TestGetKubetest(t *testing.T) { goPath: gpk, returnPath: "", - returnError: eu, + returnError: euVerbose, }, - { // 6: Fail if touch fails + {name: "6: Fail if touch fails", get: true, old: 0, @@ -309,21 +311,21 @@ func TestGetKubetest(t *testing.T) { didUp := false didTouch := false l := tester{ - func(p string) (os.FileInfo, error) { + stat: func(p string) (os.FileInfo, error) { // stat if p != c.stat { return nil, fmt.Errorf("Failed to find %s", p) } return FileInfo{time.Now().Add(c.age * -1)}, nil }, - func(name string) (string, error) { + lookPath: func(name string) (string, error) { if c.path { return filepath.Join(p, name), nil } return "", fmt.Errorf("Not on path: %s", name) }, - c.goPath, - func(cmd string, args ...string) error { + goPath: c.goPath, + wait: func(cmd string, args ...string) error { if cmd == "go" { if c.upgraded { didUp = true @@ -338,14 +340,24 @@ func TestGetKubetest(t *testing.T) { return et }, } - if p, e := l.getKubetest(c.get, c.old); p != c.returnPath || e != c.returnError { - t.Errorf("%d: c=%v p=%v e=%v", i, c, p, e) + p, e := l.getKubetest(c.get, c.old) + if p != c.returnPath { + t.Errorf("%d: test=%q returnPath %q != %q", i, c.name, p, c.returnPath) + } + if e == nil || c.returnError == nil { + if e != c.returnError { + t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError) + } + } else { + if e.Error() != c.returnError.Error() { + t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError) + } } if didUp != c.upgraded { - t.Errorf("%d: bad upgrade state of %v", i, didUp) + t.Errorf("%d: test=%q bad upgrade state of %v", i, c.name, didUp) } if didTouch != c.touched { - t.Errorf("%d: bad touch state of %v", i, didTouch) + t.Errorf("%d: test=%q bad touch state of %v", i, c.name, didTouch) } } } diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index bb52e3702bb..d892786debc 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -157,6 +157,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}" ${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} \ ${NODE_OS_DISTRIBUTION:+"--node-os-distro=${NODE_OS_DISTRIBUTION}"} \ ${NUM_NODES:+"--num-nodes=${NUM_NODES}"} \ + ${CLUSTER_IP_RANGE:+"--cluster-ip-range=${CLUSTER_IP_RANGE}"} \ ${E2E_CLEAN_START:+"--clean-start=true"} \ ${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \ ${E2E_REPORT_DIR:+"--report-dir=${E2E_REPORT_DIR}"} \ diff --git a/hack/godep-save.sh b/hack/godep-save.sh index 5749eb9facc..ef2988ef572 100755 --- a/hack/godep-save.sh +++ b/hack/godep-save.sh @@ -22,8 +22,14 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" source "${KUBE_ROOT}/hack/lib/util.sh" +kube::util::ensure_single_dir_gopath kube::util::ensure_godep_version v79 +if [ -e "${KUBE_ROOT}/vendor" -o -e "${KUBE_ROOT}/Godeps" ]; then + echo "The directory vendor/ or Godeps/ exists. Remove them before running godep-save.sh" 1>&2 + exit 1 +fi + # Some things we want in godeps aren't code dependencies, so ./... # won't pick them up. REQUIRED_BINS=( @@ -34,34 +40,34 @@ REQUIRED_BINS=( ) pushd "${KUBE_ROOT}" > /dev/null + # sanity check that staging directories do not exist in GOPATH + error=0 + for repo in $(ls ${KUBE_ROOT}/staging/src/k8s.io); do + if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then + echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2 + error=1 + fi + done + if [ "${error}" = "1" ]; then + exit 1 + fi + + echo "Running godep save. This will take around 15 minutes." GOPATH=${GOPATH}:${KUBE_ROOT}/staging godep save "${REQUIRED_BINS[@]}" # create a symlink in vendor directory pointing to the staging client. This # let other packages use the staging client as if it were vendored. - if [ ! -e "vendor/k8s.io/api" ]; then - ln -s ../../staging/src/k8s.io/api vendor/k8s.io/api - fi - if [ ! -e "vendor/k8s.io/client-go" ]; then - ln -s ../../staging/src/k8s.io/client-go vendor/k8s.io/client-go - fi - if [ ! -e "vendor/k8s.io/apiserver" ]; then - ln -s ../../staging/src/k8s.io/apiserver vendor/k8s.io/apiserver - fi - if [ ! -e "vendor/k8s.io/apimachinery" ]; then - ln -s ../../staging/src/k8s.io/apimachinery vendor/k8s.io/apimachinery - fi - if [ ! -e "vendor/k8s.io/kube-aggregator" ]; then - ln -s ../../staging/src/k8s.io/kube-aggregator vendor/k8s.io/kube-aggregator - fi - if [ ! -e "vendor/k8s.io/apiextensions-apiserver" ]; then - ln -s ../../staging/src/k8s.io/apiextensions-apiserver vendor/k8s.io/apiextensions-apiserver - fi - if [ ! -e "vendor/k8s.io/sample-apiserver" ]; then - ln -s ../../staging/src/k8s.io/sample-apiserver vendor/k8s.io/sample-apiserver - fi - if [ ! -e "vendor/k8s.io/metrics" ]; then - ln -s ../../staging/src/k8s.io/metrics vendor/k8s.io/metrics - fi + for repo in $(ls ${KUBE_ROOT}/staging/src/k8s.io); do + if [ ! -e "vendor/k8s.io/${repo}" ]; then + ln -s "../../staging/src/k8s.io/${repo}" "vendor/k8s.io/${repo}" + fi + done popd > /dev/null -echo "Don't forget to run hack/update-godep-licenses.sh if you added or removed a dependency!" +# Workaround broken symlink in docker repo because godep copies the link, but not the target +rm -rf ${KUBE_ROOT}/vendor/github.com/docker/docker/project/ + +echo +echo "Don't forget to run:" +echo "- hack/update-bazel.sh to recreate the BUILD files" +echo "- hack/update-godep-licenses.sh if you added or removed a dependency!" diff --git a/hack/lib/util.sh b/hack/lib/util.sh index c32f47c77fc..745a8982cfa 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -205,6 +205,7 @@ kube::util::gen-docs() { mkdir -p "${dest}/docs/admin/" "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver" "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager" + "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager" "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy" "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler" "${genkubedocs}" "${dest}/docs/admin/" "kubelet" @@ -218,6 +219,7 @@ kube::util::gen-docs() { mkdir -p "${dest}/docs/man/man1/" "${genman}" "${dest}/docs/man/man1/" "kube-apiserver" "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager" + "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager" "${genman}" "${dest}/docs/man/man1/" "kube-proxy" "${genman}" "${dest}/docs/man/man1/" "kube-scheduler" "${genman}" "${dest}/docs/man/man1/" "kubelet" @@ -511,6 +513,14 @@ kube::util::ensure_godep_version() { godep version } +# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple. +kube::util::ensure_single_dir_gopath() { + if [[ "${GOPATH}" == *:* ]]; then + echo "GOPATH must consist of a single directory." 1>&2 + exit 1 + fi +} + # Checks whether there are any files matching pattern $2 changed between the # current branch and upstream branch named by $1. # Returns 1 (false) if there are no changes, 0 (true) if there are changes diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 928d60563b4..ca4a74516dc 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -98,6 +98,9 @@ ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""} # START_MODE can be 'all', 'kubeletonly', or 'nokubelet' START_MODE=${START_MODE:-"all"} +# A list of controllers to enable +KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}" + # sanity check for OpenStack provider if [ "${CLOUD_PROVIDER}" == "openstack" ]; then if [ "${CLOUD_CONFIG}" == "" ]; then @@ -574,6 +577,7 @@ function start_controller_manager { CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \ --v=${LOG_LEVEL} \ + --vmodule="${LOG_SPEC}" \ --service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \ --root-ca-file="${ROOT_CA_FILE}" \ --cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \ @@ -586,6 +590,7 @@ function start_controller_manager { --cloud-config="${CLOUD_CONFIG}" \ --kubeconfig "$CERT_DIR"/controller.kubeconfig \ --use-service-account-credentials \ + --controllers="${KUBE_CONTROLLERS}" \ --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 & CTLRMGR_PID=$! } @@ -649,6 +654,7 @@ function start_kubelet { sudo -E "${GO_OUT}/hyperkube" kubelet ${priv_arg}\ --v=${LOG_LEVEL} \ + --vmodule="${LOG_SPEC}" \ --chaos-chance="${CHAOS_CHANCE}" \ --container-runtime="${CONTAINER_RUNTIME}" \ --rkt-path="${RKT_PATH}" \ diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index fc34fe9e6c0..5a507a46868 100644 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -797,7 +797,7 @@ __EOF__ chmod +x /tmp/tmp-editor.sh # Pre-condition: valid-pod POD has image nginx kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' - EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod + [[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]] # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:' # cleaning @@ -1492,161 +1492,6 @@ __EOF__ set +o errexit } -run_tpr_tests() { - set -o nounset - set -o errexit - - create_and_use_new_namespace - kube::log::status "Testing kubectl tpr" - kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "ThirdPartyResource", - "apiVersion": "extensions/v1beta1", - "metadata": { - "name": "foo.company.com" - }, - "versions": [ - { - "name": "v1" - } - ] -} -__EOF__ - - # Post-Condition: assertion object exist - kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'foo.company.com:' - - kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "ThirdPartyResource", - "apiVersion": "extensions/v1beta1", - "metadata": { - "name": "bar.company.com" - }, - "versions": [ - { - "name": "v1" - } - ] -} -__EOF__ - - # Post-Condition: assertion object exist - kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'bar.company.com:foo.company.com:' - - run_non_native_resource_tests - - # teardown - kubectl delete thirdpartyresources/foo.company.com "${kube_flags[@]}" - kubectl delete thirdpartyresources/bar.company.com "${kube_flags[@]}" - - set +o nounset - set +o errexit -} - -run_tpr_migration_tests() { - set -o nounset - set -o errexit - - kube::log::status "Testing kubectl tpr migration" - local i tries - create_and_use_new_namespace - - # Create CRD first. This is sort of backwards so we can create a marker below. - kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__ -{ - "kind": "CustomResourceDefinition", - "apiVersion": "apiextensions.k8s.io/v1beta1", - "metadata": { - "name": "foos.company.crd" - }, - "spec": { - "group": "company.crd", - "version": "v1", - "names": { - "plural": "foos", - "kind": "Foo" - } - } -} -__EOF__ - # Wait for API to become available. - tries=0 - until kubectl "${kube_flags[@]}" get foos.company.crd || [ $tries -gt 10 ]; do - tries=$((tries+1)) - sleep ${tries} - done - kube::test::get_object_assert foos.company.crd '{{len .items}}' '0' - - # Create a marker that only exists in CRD so we know when CRD is active vs. TPR. - kubectl "${kube_flags[@]}" create -f - << __EOF__ -{ - "kind": "Foo", - "apiVersion": "company.crd/v1", - "metadata": { - "name": "crd-marker" - }, - "testValue": "only exists in CRD" -} -__EOF__ - kube::test::get_object_assert foos.company.crd '{{len .items}}' '1' - - # Now create a TPR that sits in front of the CRD and hides it. - kubectl "${kube_flags[@]}" create -f - << __EOF__ -{ - "kind": "ThirdPartyResource", - "apiVersion": "extensions/v1beta1", - "metadata": { - "name": "foo.company.crd" - }, - "versions": [ - { - "name": "v1" - } - ] -} -__EOF__ - # The marker should disappear. - kube::test::wait_object_assert foos.company.crd '{{len .items}}' '0' - - # Add some items to the TPR. - for i in {1..10}; do - kubectl "${kube_flags[@]}" create -f - << __EOF__ -{ - "kind": "Foo", - "apiVersion": "company.crd/v1", - "metadata": { - "name": "tpr-${i}" - }, - "testValue": "migrate-${i}" -} -__EOF__ - done - kube::test::get_object_assert foos.company.crd '{{len .items}}' '10' - - # Delete the TPR and wait for the CRD to take over. - kubectl "${kube_flags[@]}" delete thirdpartyresource/foo.company.crd - tries=0 - until kubectl "${kube_flags[@]}" get foos.company.crd/crd-marker || [ $tries -gt 10 ]; do - tries=$((tries+1)) - sleep ${tries} - done - kube::test::get_object_assert foos.company.crd/crd-marker '{{.testValue}}' 'only exists in CRD' - - # Check if the TPR items were migrated to CRD. - kube::test::get_object_assert foos.company.crd '{{len .items}}' '11' - for i in {1..10}; do - kube::test::get_object_assert foos.company.crd/tpr-${i} '{{.testValue}}' "migrate-${i}" - done - - # teardown - kubectl delete customresourcedefinitions/foos.company.crd "${kube_flags_with_token[@]}" - - set +o nounset - set +o errexit -} - - kube::util::non_native_resources() { local times local wait @@ -2435,6 +2280,9 @@ run_service_tests() { # prove role=master kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" + # Set selector of a local file without talking to the server + kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}" + ! kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" # Set command to change the selector. kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan # prove role=padawan @@ -2443,6 +2291,10 @@ run_service_tests() { kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend # prove role=master kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" + # Show dry-run works on running selector + kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}" + ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" + kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" ### Dump current redis-master service output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}") @@ -4455,13 +4307,6 @@ runTests() { record_command run_crd_tests fi - if kube::test::if_supports_resource "${thirdpartyresources}" ; then - record_command run_tpr_tests - if kube::test::if_supports_resource "${customresourcedefinitions}" ; then - record_command run_tpr_migration_tests - fi - fi - ################# # Run cmd w img # ################# diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh index 54a8233c7e9..8c1e2e1158e 100755 --- a/hack/make-rules/test-e2e-node.sh +++ b/hack/make-rules/test-e2e-node.sh @@ -33,6 +33,7 @@ container_runtime_endpoint=${CONTAINER_RUNTIME_ENDPOINT:-""} image_service_endpoint=${IMAGE_SERVICE_ENDPOINT:-""} run_until_failure=${RUN_UNTIL_FAILURE:-"false"} test_args=${TEST_ARGS:-""} +system_spec_name=${SYSTEM_SPEC_NAME:-} # Parse the flags to pass to ginkgo ginkgoflags="" @@ -135,7 +136,7 @@ if [ $remote = true ] ; then --results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \ --image-project="$image_project" --instance-name-prefix="$instance_prefix" \ --delete-instances="$delete_instances" --test_args="$test_args" --instance-metadata="$metadata" \ - --image-config-file="$image_config_file" \ + --image-config-file="$image_config_file" --system-spec-name="$system_spec_name" \ 2>&1 | tee -i "${artifacts}/build-log.txt" exit $? @@ -163,7 +164,8 @@ else # Test using the host the script was run on # Provided for backwards compatibility - go run test/e2e_node/runner/local/run_local.go --ginkgo-flags="$ginkgoflags" \ + go run test/e2e_node/runner/local/run_local.go \ + --system-spec-name="$system_spec_name" --ginkgo-flags="$ginkgoflags" \ --test-flags="--container-runtime=${runtime} \ --container-runtime-endpoint=${container_runtime_endpoint} \ --image-service-endpoint=${image_service_endpoint} \ diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index 1560985a8d4..d1710bdc4c9 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -29,7 +29,25 @@ EXCLUDED_PATTERNS=( "verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized ) +# Only run whitelisted fast checks in quick mode. +# These run in <10s each on enisoc's workstation, assuming that +# `make` and `hack/godep-restore.sh` had already been run. +QUICK_PATTERNS+=( + "verify-api-groups.sh" + "verify-bazel.sh" + "verify-boilerplate.sh" + "verify-godep-licenses.sh" + "verify-gofmt.sh" + "verify-pkg-names.sh" + "verify-readonly-packages.sh" + "verify-staging-client-go.sh" + "verify-staging-imports.sh" + "verify-test-images.sh" + "verify-test-owners.sh" +) + EXCLUDED_CHECKS=$(ls ${EXCLUDED_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true) +QUICK_CHECKS=$(ls ${QUICK_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true) function is-excluded { for e in ${EXCLUDED_CHECKS[@]}; do @@ -40,6 +58,15 @@ function is-excluded { return 1 } +function is-quick { + for e in ${QUICK_CHECKS[@]}; do + if [[ $1 -ef "$e" ]]; then + return + fi + done + return 1 +} + function run-cmd { if ${SILENT}; then "$@" &> /dev/null @@ -58,6 +85,10 @@ function run-checks { echo "Skipping ${t}" continue fi + if ${QUICK} && ! is-quick "${t}" ; then + echo "Skipping ${t} in quick mode" + continue + fi echo -e "Verifying ${t}" local start=$(date +%s) run-cmd "${runner}" "${t}" && tr=$? || tr=$? @@ -71,11 +102,17 @@ function run-checks { done } -while getopts ":v" opt; do +SILENT=true +QUICK=false + +while getopts ":vQ" opt; do case ${opt} in v) SILENT=false ;; + Q) + QUICK=true + ;; \?) echo "Invalid flag: -${OPTARG}" >&2 exit 1 @@ -87,6 +124,10 @@ if ${SILENT} ; then echo "Running in silent mode, run with -v if you want to see script logs." fi +if ${QUICK} ; then + echo "Running in quick mode (-Q flag). Only fast checks will run." +fi + ret=0 run-checks "${KUBE_ROOT}/hack/verify-*.sh" bash run-checks "${KUBE_ROOT}/hack/verify-*.py" python diff --git a/hack/update_owners.py b/hack/update_owners.py index e6c3d06d04b..2faefeb7ca6 100755 --- a/hack/update_owners.py +++ b/hack/update_owners.py @@ -111,7 +111,7 @@ def get_maintainers(): "nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole", "Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton", "soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin", - "timothysc", "timstclair", "vishh", "wojtek-t", "xiang90", "yifan-gu", + "timothysc", "tallclair", "vishh", "wojtek-t", "xiang90", "yifan-gu", "yujuhong", "zmerlynn"} return sorted(ret - SKIP_MAINTAINERS) diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 633f76759e7..76fc26d3244 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -53,6 +53,7 @@ cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} cluster/log-dump.sh: local -r node_name="${1}" cluster/log-dump.sh: for node_name in "${node_names[@]}"; do cluster/log-dump.sh:readonly report_dir="${1:-_artifacts}" +cluster/log-dump/log-dump.sh:readonly report_dir="${1:-_artifacts}" cluster/photon-controller/templates/salt-master.sh: api_servers: $MASTER_NAME cluster/photon-controller/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') cluster/photon-controller/util.sh: node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}') diff --git a/hack/verify-flags/excluded-flags.txt b/hack/verify-flags/excluded-flags.txt index fc1e86d0e8d..7b6cd4e4cce 100644 --- a/hack/verify-flags/excluded-flags.txt +++ b/hack/verify-flags/excluded-flags.txt @@ -24,3 +24,4 @@ valid_flag retry_time file_content_in_loop break_on_expected_content +Premium_LRS diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 991aa53aa75..33d02fdd602 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -46,6 +46,7 @@ audit-log-maxage audit-log-maxbackup audit-log-maxsize audit-log-path +audit-policy-file audit-webhook-config-file audit-webhook-mode authentication-kubeconfig @@ -105,6 +106,7 @@ cluster-context cluster-dns cluster-domain cluster-ip +cluster-ip-range cluster-monitor-period cluster-name cluster-signing-cert-file @@ -185,6 +187,7 @@ docker-exec-handler docker-password docker-server docker-username +dockershim-checkpoint-dir driver-port drop-embedded-fields dry-run @@ -297,6 +300,7 @@ gather-logs-sizes gather-metrics-at-teardown gather-resource-usage gather-suite-metrics-at-teardown +gce-api-endpoint gce-multizone gce-project gce-service-account @@ -331,6 +335,7 @@ http-port ignore-daemonsets ignore-not-found image-config-file +image-description image-gc-high-threshold image-gc-low-threshold image-project @@ -467,19 +472,7 @@ max-outgoing-qps max-pods max-requests-inflight max-unavailable -mesos-authentication-principal -mesos-authentication-provider -mesos-authentication-secret-file -mesos-cgroup-prefix -mesos-default-pod-roles -mesos-executor-cpus -mesos-executor-mem -mesos-framework-roles -mesos-generate-task-discovery -mesos-launch-grace-period -mesos-master -mesos-sandbox-overlay -mesos-user +metrics-bind-address metrics-path min-available minimum-container-ttl-duration @@ -533,6 +526,7 @@ output-base output-directory output-file-base output-package +output-patch output-print-type output-version out-version @@ -680,6 +674,8 @@ system-cgroups system-pods-startup-timeout system-reserved system-reserved-cgroup +system-spec-file +system-spec-name system-validate-mode target-port target-ram-mb diff --git a/hack/verify-generated-protobuf.sh b/hack/verify-generated-protobuf.sh index de0b4dbbb96..c05ecf4e595 100755 --- a/hack/verify-generated-protobuf.sh +++ b/hack/verify-generated-protobuf.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -APIROOTS=${APIROOTS:-pkg/api pkg/apis pkg/runtime pkg/util/intstr pkg/watch staging/src/k8s.io/apimachinery/pkg/api staging/src/k8s.io/apimachinery/pkg/apis staging/src/k8s.io/apiserver/pkg staging/src/k8s.io/api staging/src/k8s.io/metrics/pkg/apis} +APIROOTS=${APIROOTS:-pkg/api pkg/apis pkg/runtime pkg/watch staging/src/k8s.io/apimachinery/pkg/api staging/src/k8s.io/apimachinery/pkg/apis staging/src/k8s.io/apiserver/pkg staging/src/k8s.io/api staging/src/k8s.io/metrics/pkg/apis} _tmp="${KUBE_ROOT}/_tmp" cleanup() { diff --git a/hack/verify-godeps.sh b/hack/verify-godeps.sh index 4f8c60739fc..d56c0597629 100755 --- a/hack/verify-godeps.sh +++ b/hack/verify-godeps.sh @@ -48,7 +48,8 @@ source "${KUBE_ROOT}/hack/lib/init.sh" readonly branch=${1:-${KUBE_VERIFY_GIT_BRANCH:-master}} if ! [[ ${KUBE_FORCE_VERIFY_CHECKS:-} =~ ^[yY]$ ]] && \ ! kube::util::has_changes_against_upstream_branch "${branch}" 'Godeps/' && \ - ! kube::util::has_changes_against_upstream_branch "${branch}" 'vendor/'; then + ! kube::util::has_changes_against_upstream_branch "${branch}" 'vendor/' && \ + ! kube::util::has_changes_against_upstream_branch "${branch}" 'hack/'; then exit 0 fi @@ -85,13 +86,8 @@ _kubetmp="${_kubetmp}/kubernetes" export GOPATH="${_tmpdir}" pushd "${_kubetmp}" 2>&1 > /dev/null - kube::util::ensure_godep_version v79 - - export GOPATH="${GOPATH}:${_kubetmp}/staging" - # Fill out that nice clean place with the kube godeps - echo "Starting to download all kubernetes godeps. This takes a while" - godep restore - echo "Download finished" + # Restore the Godeps into our temp directory + hack/godep-restore.sh # Destroy deps in the copy of the kube tree rm -rf ./Godeps ./vendor diff --git a/hack/verify-golint.sh b/hack/verify-golint.sh index b8c575b94ab..76baaf9b222 100755 --- a/hack/verify-golint.sh +++ b/hack/verify-golint.sh @@ -23,6 +23,12 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::verify_go_version +if ! which golint > /dev/null; then + echo 'Can not find golint, install with:' + echo 'go get -u github.com/golang/lint/golint' + exit 1 +fi + cd "${KUBE_ROOT}" array_contains () { diff --git a/hack/verify-staging-imports.sh b/hack/verify-staging-imports.sh index 8ef5a6a18b7..102b6fc1fa5 100755 --- a/hack/verify-staging-imports.sh +++ b/hack/verify-staging-imports.sh @@ -65,7 +65,7 @@ function print_forbidden_imports () { } RC=0 -print_forbidden_imports apimachinery || RC=1 +print_forbidden_imports apimachinery should_be_leaf || RC=1 print_forbidden_imports api k8s.io/apimachinery || RC=1 print_forbidden_imports client-go k8s.io/apimachinery k8s.io/api || RC=1 print_forbidden_imports apiserver k8s.io/apimachinery k8s.io/client-go k8s.io/api || RC=1 @@ -81,7 +81,7 @@ if grep -rq '// import "k8s.io/kubernetes/' 'staging/'; then exit 1 fi -for EXAMPLE in vendor/k8s.io/client-go/examples/{in-cluster-client-configuration,out-of-cluster-client-configuration,third-party-resources-deprecated} vendor/k8s.io/apiextensions-apiserver/examples ; do +for EXAMPLE in vendor/k8s.io/client-go/examples/{in-cluster-client-configuration,out-of-cluster-client-configuration} vendor/k8s.io/apiextensions-apiserver/examples ; do test -d "${EXAMPLE}" # make sure example is still there if go list -f '{{ join .Deps "\n" }}' "./${EXAMPLE}/..." | sort | uniq | grep -q k8s.io/client-go/plugin; then echo "${EXAMPLE} imports client-go plugins by default, but shouldn't." diff --git a/labels.yaml b/labels.yaml index 2cd2e6eb373..45c33b9bdaf 100644 --- a/labels.yaml +++ b/labels.yaml @@ -4,6 +4,8 @@ repo: kubernetes/kubernetes labels: - name: approved color: 0ffa16 +- name: approved-for-milestone + color: fef2c0 - name: area/admin color: 0052cc - name: area/admission-control @@ -48,12 +50,16 @@ labels: color: 0052cc - name: area/HA color: 0052cc +- name: area/hw-accelerators + color: 0052cc - name: area/images-registry color: 0052cc - name: area/ingress color: 0052cc - name: area/introspection color: 0052cc +- name: area/ipv6 + color: 0052cc - name: area/isolation color: 0052cc - name: area/kube-proxy @@ -92,6 +98,8 @@ labels: color: d4c5f9 - name: area/platform/gke color: d4c5f9 +- name: area/platform/mesos + color: d4c5f9 - name: area/platform/vagrant color: d4c5f9 - name: area/platform/vsphere @@ -180,6 +188,8 @@ labels: color: f7c6c7 - name: kind/friction color: c7def8 +- name: kind/mesos-flake + color: f7c6c7 - name: kind/new-api color: c7def8 - name: kind/old-docs @@ -200,6 +210,8 @@ labels: color: b60205 - name: needs-rebase color: BDBDBD +- name: needs-sig + color: ededed - name: non-release-blocker color: 0e8a16 - name: ok-to-merge @@ -252,12 +264,16 @@ labels: color: d2b48c - name: sig/apps color: d2b48c +- name: sig/architecture + color: d2b48c - name: sig/auth color: d2b48c - name: sig/autoscaling color: d2b48c - name: sig/aws color: d2b48c +- name: sig/azure + color: d2b48c - name: sig/big-data color: d2b48c - name: sig/cli @@ -282,6 +298,8 @@ labels: color: d2b48c - name: sig/openstack color: d2b48c +- name: sig/release + color: d2b48c - name: sig/rktnetes color: d2b48c - name: sig/scalability @@ -294,6 +312,8 @@ labels: color: d2b48c - name: sig/testing color: d2b48c +- name: sig/ui + color: d2b48c - name: sig/windows color: d2b48c - name: size/L @@ -310,6 +330,10 @@ labels: color: ee0000 - name: stale color: "795548" +- name: status/in-progress + color: fef2c0 +- name: status/in-review + color: fef2c0 - name: team/api (deprecated - do not use) color: ededed - name: team/cluster (deprecated - do not use) @@ -320,6 +344,8 @@ labels: color: d2b48c - name: team/huawei color: d2b48c +- name: team/mesosphere + color: d2b48c - name: team/redhat color: d2b48c - name: team/test-infra diff --git a/pkg/BUILD b/pkg/BUILD index b7edeac6d32..59a8fd75733 100644 --- a/pkg/BUILD +++ b/pkg/BUILD @@ -44,7 +44,7 @@ filegroup( "//pkg/client/conditions:all-srcs", "//pkg/client/informers/informers_generated/externalversions:all-srcs", "//pkg/client/informers/informers_generated/internalversion:all-srcs", - "//pkg/client/leaderelection:all-srcs", + "//pkg/client/leaderelectionconfig:all-srcs", "//pkg/client/listers/admissionregistration/internalversion:all-srcs", "//pkg/client/listers/admissionregistration/v1alpha1:all-srcs", "//pkg/client/listers/apps/internalversion:all-srcs", @@ -101,7 +101,6 @@ filegroup( "//pkg/kubemark:all-srcs", "//pkg/labels:all-srcs", "//pkg/master:all-srcs", - "//pkg/metrics:all-srcs", "//pkg/printers:all-srcs", "//pkg/probe:all-srcs", "//pkg/proxy:all-srcs", diff --git a/pkg/api/OWNERS b/pkg/api/OWNERS index 3a9b0c6d159..6d85ec75f9b 100644 --- a/pkg/api/OWNERS +++ b/pkg/api/OWNERS @@ -17,7 +17,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -33,7 +32,7 @@ reviewers: - pwittrock - roberthbailey - ncdc -- timstclair +- tallclair - yifan-gu - eparis - mwielgus diff --git a/pkg/api/ref/ref_test.go b/pkg/api/ref/ref_test.go index 10a99abad7b..18042bf2408 100644 --- a/pkg/api/ref/ref_test.go +++ b/pkg/api/ref/ref_test.go @@ -35,8 +35,6 @@ type ExtensionAPIObject struct { metav1.ObjectMeta } -func (obj *ExtensionAPIObject) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - func TestGetReference(t *testing.T) { // when vendoring kube, if you don't force the set of registered versions (like make test does) diff --git a/pkg/api/testapi/OWNERS b/pkg/api/testapi/OWNERS index 1703ec89061..ede98b35226 100755 --- a/pkg/api/testapi/OWNERS +++ b/pkg/api/testapi/OWNERS @@ -8,9 +8,8 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune -- timstclair +- tallclair - eparis - soltysh - madhusudancs diff --git a/pkg/api/testapi/testapi.go b/pkg/api/testapi/testapi.go index 3a08f575bf9..ebb42a4cb18 100644 --- a/pkg/api/testapi/testapi.go +++ b/pkg/api/testapi/testapi.go @@ -432,7 +432,7 @@ func (g TestGroup) SelfLink(resource, name string) string { } } -// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. +// ResourcePathWithPrefix returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. // For ex, this is of the form: // /api/v1/watch/namespaces/foo/pods/pod0 for v1. func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { @@ -462,7 +462,7 @@ func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name stri return path } -// Returns the appropriate path for the given resource, namespace and name. +// ResourcePath returns the appropriate path for the given resource, namespace and name. // For example, this is of the form: // /api/v1/namespaces/foo/pods/pod0 for v1. func (g TestGroup) ResourcePath(resource, namespace, name string) string { @@ -480,6 +480,7 @@ func (g TestGroup) SubResourcePath(resource, namespace, name, sub string) string return path } +// RESTMapper returns RESTMapper in api.Registry. func (g TestGroup) RESTMapper() meta.RESTMapper { return api.Registry.RESTMapper() } @@ -494,7 +495,7 @@ func ExternalGroupVersions() schema.GroupVersions { return versions } -// Get codec based on runtime.Object +// GetCodecForObject gets codec based on runtime.Object func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) { kinds, _, err := api.Scheme.ObjectKinds(obj) if err != nil { @@ -522,6 +523,7 @@ func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) { return nil, fmt.Errorf("unexpected kind: %v", kind) } +// NewTestGroup creates a new TestGroup. func NewTestGroup(external, internal schema.GroupVersion, internalTypes map[string]reflect.Type, externalTypes map[string]reflect.Type) TestGroup { return TestGroup{external, internal, internalTypes, externalTypes} } diff --git a/pkg/api/testing/OWNERS b/pkg/api/testing/OWNERS index 0f61880702b..2ee0a449ca9 100755 --- a/pkg/api/testing/OWNERS +++ b/pkg/api/testing/OWNERS @@ -11,7 +11,6 @@ reviewers: - vishh - mikedanese - nikhiljindal -- bprashanth - erictune - pmorie - dchen1107 @@ -20,7 +19,7 @@ reviewers: - justinsb - pwittrock - roberthbailey -- timstclair +- tallclair - yifan-gu - eparis - soltysh diff --git a/pkg/api/types.go b/pkg/api/types.go index a5811a97050..e7aea27e8e1 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1008,7 +1008,7 @@ type DownwardAPIVolumeSource struct { type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' Path string - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. // +optional FieldRef *ObjectFieldSelector // Selects a resource of the container: only resources limits and requests @@ -1373,7 +1373,7 @@ type EnvVar struct { // Only one of its fields may be set. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional FieldRef *ObjectFieldSelector // Selects a resource of the container: only resources limits and requests @@ -3852,7 +3852,7 @@ const ( // Enable TTY for remote command execution ExecTTYParam = "tty" // Command to run for remote command execution - ExecCommandParamm = "command" + ExecCommandParam = "command" // Name of header that specifies stream type StreamType = "streamType" diff --git a/pkg/api/unstructured_test.go b/pkg/api/unstructured_test.go index 72de2e329d9..1351ea861ec 100644 --- a/pkg/api/unstructured_test.go +++ b/pkg/api/unstructured_test.go @@ -97,8 +97,7 @@ func doRoundTrip(t *testing.T, group testapi.TestGroup, kind string) { return } - newUnstr := make(map[string]interface{}) - err = unstructured.DefaultConverter.ToUnstructured(item, &newUnstr) + newUnstr, err := unstructured.DefaultConverter.ToUnstructured(item) if err != nil { t.Errorf("ToUnstructured failed: %v", err) return @@ -138,8 +137,8 @@ func BenchmarkToFromUnstructured(b *testing.B) { size := len(items) b.ResetTimer() for i := 0; i < b.N; i++ { - unstr := map[string]interface{}{} - if err := unstructured.DefaultConverter.ToUnstructured(&items[i%size], &unstr); err != nil { + unstr, err := unstructured.DefaultConverter.ToUnstructured(&items[i%size]) + if err != nil { b.Fatalf("unexpected error: %v", err) } obj := v1.Pod{} diff --git a/pkg/api/v1/OWNERS b/pkg/api/v1/OWNERS index fdb84b24a92..66568527f93 100755 --- a/pkg/api/v1/OWNERS +++ b/pkg/api/v1/OWNERS @@ -12,7 +12,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -27,7 +26,7 @@ reviewers: - justinsb - roberthbailey - ncdc -- timstclair +- tallclair - eparis - timothysc - piosz diff --git a/pkg/api/v1/conversion.go b/pkg/api/v1/conversion.go index 2b07586d7b5..585e7c285fe 100644 --- a/pkg/api/v1/conversion.go +++ b/pkg/api/v1/conversion.go @@ -188,6 +188,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { "metadata.labels", "metadata.name", "metadata.namespace", + "metadata.uid", "spec.nodeName", "spec.restartPolicy", "spec.serviceAccountName", diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index 081f0f56d21..e97879d2830 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -27,7 +27,7 @@ import ( // PodRequestsAndLimits returns a dictionary of all defined resources summed up for all // containers of the pod. -func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) { +func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity) { reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{} for _, container := range pod.Spec.Containers { for name, quantity := range container.Resources.Requests { diff --git a/pkg/api/validation/OWNERS b/pkg/api/validation/OWNERS index dda6d4862e8..66dfd87d3b4 100755 --- a/pkg/api/validation/OWNERS +++ b/pkg/api/validation/OWNERS @@ -12,7 +12,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -26,7 +25,7 @@ reviewers: - justinsb - pwittrock - roberthbailey -- timstclair +- tallclair - eparis - soltysh - piosz @@ -37,4 +36,3 @@ reviewers: - krousey - rootfs - markturansky -- vmarmol diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 99c63f9a382..f6bd510bfdd 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -808,7 +808,8 @@ var validDownwardAPIFieldPathExpressions = sets.NewString( "metadata.name", "metadata.namespace", "metadata.labels", - "metadata.annotations") + "metadata.annotations", + "metadata.uid") func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1556,7 +1557,7 @@ func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { return allErrs } -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP") +var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP") var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory") func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { @@ -1759,6 +1760,9 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath if mountpoints.Has(mnt.MountPath) { allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) } + if !path.IsAbs(mnt.MountPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path")) + } mountpoints.Insert(mnt.MountPath) if len(mnt.SubPath) > 0 { allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) @@ -2909,6 +2913,22 @@ func ValidateService(service *api.Service) field.ErrorList { nodePorts[key] = true } + // Check for duplicate TargetPort + portsPath = specPath.Child("ports") + targetPorts := make(map[api.ServicePort]bool) + for i, port := range service.Spec.Ports { + if (port.TargetPort.Type == intstr.Int && port.TargetPort.IntVal == 0) || (port.TargetPort.Type == intstr.String && port.TargetPort.StrVal == "") { + continue + } + portPath := portsPath.Index(i) + key := api.ServicePort{Protocol: port.Protocol, TargetPort: port.TargetPort} + _, found := targetPorts[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath.Child("targetPort"), port.TargetPort)) + } + targetPorts[key] = true + } + // Validate SourceRange field and annotation _, ok := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { @@ -4012,13 +4032,11 @@ func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.Endp ss := &subsets[i] idxPath := fldPath.Index(i) + // EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports. if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { //TODO: consider adding a RequiredOneOf() error for this and similar cases allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) } - if len(ss.Ports) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("ports"), "")) - } for addr := range ss.Addresses { allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) } diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index e29e97aaad4..6f0940d03e1 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -2584,6 +2584,24 @@ func TestValidateEnv(t *testing.T) { }, }, }, + { + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: api.Registry.GroupOrDie(api.GroupName).GroupVersion.String(), + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: api.Registry.GroupOrDie(api.GroupName).GroupVersion.String(), + FieldPath: "metadata.uid", + }, + }, + }, { Name: "abc", ValueFrom: &api.EnvVarSource{ @@ -2644,7 +2662,7 @@ func TestValidateEnv(t *testing.T) { }, } if errs := ValidateEnv(successCase, field.NewPath("field")); len(errs) != 0 { - t.Errorf("expected success: %v", errs) + t.Errorf("expected success, got: %v", errs) } errorCases := []struct { @@ -2823,7 +2841,7 @@ func TestValidateEnv(t *testing.T) { }, }, }}, - expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.labels": supported values: metadata.name, metadata.namespace, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.labels": supported values: metadata.name, metadata.namespace, metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, }, { name: "invalid fieldPath annotations", @@ -2836,7 +2854,7 @@ func TestValidateEnv(t *testing.T) { }, }, }}, - expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.annotations": supported values: metadata.name, metadata.namespace, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.annotations": supported values: metadata.name, metadata.namespace, metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, }, { name: "unsupported fieldPath", @@ -2849,7 +2867,7 @@ func TestValidateEnv(t *testing.T) { }, }, }}, - expectedError: `valueFrom.fieldRef.fieldPath: Unsupported value: "status.phase": supported values: metadata.name, metadata.namespace, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, + expectedError: `valueFrom.fieldRef.fieldPath: Unsupported value: "status.phase": supported values: metadata.name, metadata.namespace, metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP`, }, } for _, tc := range errorCases { @@ -3027,7 +3045,6 @@ func TestValidateVolumeMounts(t *testing.T) { {Name: "abc-123", MountPath: "/bab", SubPath: "baz"}, {Name: "abc-123", MountPath: "/bac", SubPath: ".baz"}, {Name: "abc-123", MountPath: "/bad", SubPath: "..baz"}, - {Name: "abc", MountPath: "c:/foo/bar"}, } if errs := ValidateVolumeMounts(successCase, volumes, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success: %v", errs) @@ -3037,6 +3054,7 @@ func TestValidateVolumeMounts(t *testing.T) { "empty name": {{Name: "", MountPath: "/foo"}}, "name not found": {{Name: "", MountPath: "/foo"}}, "empty mountpath": {{Name: "abc", MountPath: ""}}, + "relative mountpath": {{Name: "abc", MountPath: "bar"}}, "mountpath collision": {{Name: "foo", MountPath: "/path/a"}, {Name: "bar", MountPath: "/path/a"}}, "absolute subpath": {{Name: "abc", MountPath: "/bar", SubPath: "/baz"}}, "subpath in ..": {{Name: "abc", MountPath: "/bar", SubPath: "../baz"}}, @@ -6284,6 +6302,42 @@ func TestValidateService(t *testing.T) { }, numErrs: 0, }, + { + name: "invalid duplicate targetports (number with same protocol)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}) + }, + numErrs: 1, + }, + { + name: "invalid duplicate targetports (name with same protocol)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", TargetPort: intstr.FromString("http")}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "TCP", TargetPort: intstr.FromString("http")}) + }, + numErrs: 1, + }, + { + name: "valid duplicate targetports (number with different protocols)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "UDP", TargetPort: intstr.FromInt(8080)}) + }, + numErrs: 0, + }, + { + name: "valid duplicate targetports (name with different protocols)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", TargetPort: intstr.FromString("http")}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "UDP", TargetPort: intstr.FromString("http")}) + }, + numErrs: 0, + }, { name: "valid type - cluster", tweakSvc: func(s *api.Service) { @@ -9463,6 +9517,14 @@ func TestValidateEndpoints(t *testing.T) { }, }, }, + "empty ports": { + ObjectMeta: metav1.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.3.3"}}, + }, + }, + }, } for k, v := range successCases { @@ -9505,17 +9567,6 @@ func TestValidateEndpoints(t *testing.T) { }, errorType: "FieldValueRequired", }, - "empty ports": { - endpoints: api.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, - Subsets: []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{IP: "10.10.3.3"}}, - }, - }, - }, - errorType: "FieldValueRequired", - }, "invalid IP": { endpoints: api.Endpoints{ ObjectMeta: metav1.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, diff --git a/pkg/apimachinery/tests/BUILD b/pkg/apimachinery/tests/BUILD index b09fce71a53..20d0a176ce4 100644 --- a/pkg/apimachinery/tests/BUILD +++ b/pkg/apimachinery/tests/BUILD @@ -4,53 +4,20 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", - "go_library", "go_test", ) go_test( name = "go_default_test", - srcs = [ - "api_meta_help_test.go", - "api_meta_meta_test.go", - "api_meta_scheme_test.go", - "apis_meta_v1_unstructed_unstructure_test.go", - "runtime_helper_test.go", - "runtime_serializer_protobuf_protobuf_test.go", - "runtime_unversioned_test.go", - "watch_until_test.go", - ], - library = ":go_default_library", + srcs = ["api_meta_scheme_test.go"], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", "//pkg/api/testapi:go_default_library", - "//pkg/api/validation:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", ], ) -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/pkg/apis/OWNERS b/pkg/apis/OWNERS index 2554bfcc77c..66878903a22 100644 --- a/pkg/apis/OWNERS +++ b/pkg/apis/OWNERS @@ -16,7 +16,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - pmorie @@ -29,7 +28,7 @@ reviewers: - justinsb - pwittrock - ncdc -- timstclair +- tallclair - yifan-gu - eparis - mwielgus diff --git a/pkg/apis/abac/v0/register.go b/pkg/apis/abac/v0/register.go index c4aa1c09db4..4efcc092961 100644 --- a/pkg/apis/abac/v0/register.go +++ b/pkg/apis/abac/v0/register.go @@ -60,5 +60,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) return nil } - -func (obj *Policy) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/pkg/apis/abac/v1beta1/register.go b/pkg/apis/abac/v1beta1/register.go index 1b4f8ed50b8..e157a69aac6 100644 --- a/pkg/apis/abac/v1beta1/register.go +++ b/pkg/apis/abac/v1beta1/register.go @@ -60,5 +60,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) return nil } - -func (obj *Policy) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/pkg/apis/apps/OWNERS b/pkg/apis/apps/OWNERS index 1cdc56eec04..e06ff4c481e 100755 --- a/pkg/apis/apps/OWNERS +++ b/pkg/apis/apps/OWNERS @@ -4,12 +4,11 @@ reviewers: - smarterclayton - deads2k - caesarxuchao -- bprashanth - pmorie - sttts - saad-ali - ncdc -- timstclair +- tallclair - timothysc - dims - errordeveloper diff --git a/pkg/apis/componentconfig/OWNERS b/pkg/apis/componentconfig/OWNERS index a644a39fcd7..16e6aa37c33 100755 --- a/pkg/apis/componentconfig/OWNERS +++ b/pkg/apis/componentconfig/OWNERS @@ -11,7 +11,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - sttts - dchen1107 diff --git a/pkg/apis/extensions/OWNERS b/pkg/apis/extensions/OWNERS index 494763a6934..29a40fb54ee 100755 --- a/pkg/apis/extensions/OWNERS +++ b/pkg/apis/extensions/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune - pmorie - sttts @@ -19,7 +18,7 @@ reviewers: - janetkuo - justinsb - ncdc -- timstclair +- tallclair - mwielgus - timothysc - soltysh diff --git a/pkg/apis/extensions/validation/validation.go b/pkg/apis/extensions/validation/validation.go index 4be86af92dd..ace36544ffa 100644 --- a/pkg/apis/extensions/validation/validation.go +++ b/pkg/apis/extensions/validation/validation.go @@ -40,55 +40,6 @@ import ( psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" ) -func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateThirdPartyResource(update)...) - return allErrs -} - -func ValidateThirdPartyResourceName(name string, prefix bool) []string { - // Make sure it's a valid DNS subdomain - if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 { - return msgs - } - - // Make sure it's at least three segments (kind + two-segment group name) - if !prefix { - parts := strings.Split(name, ".") - if len(parts) < 3 { - return []string{"must be at least three segments long: .."} - } - } - - return nil -} - -func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) - - versions := sets.String{} - if len(obj.Versions) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("versions"), "must specify at least one version")) - } - for ix := range obj.Versions { - version := &obj.Versions[ix] - if len(version.Name) == 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty")) - } else { - for _, msg := range validation.IsDNS1123Label(version.Name) { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg)) - } - } - if versions.Has(version.Name) { - allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version)) - } - versions.Insert(version.Name) - } - return allErrs -} - // ValidateDaemonSet tests if required fields in the DaemonSet are set. func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) @@ -427,14 +378,6 @@ func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorL return allErrs } -func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) -} - -func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata")) -} - // ValidateIngress tests if required fields in the Ingress are set. func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata")) diff --git a/pkg/apis/networking/validation/validation.go b/pkg/apis/networking/validation/validation.go index e2db31430fe..12272d50289 100644 --- a/pkg/apis/networking/validation/validation.go +++ b/pkg/apis/networking/validation/validation.go @@ -17,8 +17,6 @@ limitations under the License. package validation import ( - "reflect" - unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation" @@ -92,8 +90,6 @@ func ValidateNetworkPolicy(np *networking.NetworkPolicy) field.ErrorList { func ValidateNetworkPolicyUpdate(update, old *networking.NetworkPolicy) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - if !reflect.DeepEqual(update.Spec, old.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden.")) - } + allErrs = append(allErrs, ValidateNetworkPolicySpec(&update.Spec, field.NewPath("spec"))...) return allErrs } diff --git a/pkg/apis/networking/validation/validation_test.go b/pkg/apis/networking/validation/validation_test.go index 6001d5bc4e6..154d783bf15 100644 --- a/pkg/apis/networking/validation/validation_test.go +++ b/pkg/apis/networking/validation/validation_test.go @@ -271,8 +271,8 @@ func TestValidateNetworkPolicyUpdate(t *testing.T) { old networking.NetworkPolicy update networking.NetworkPolicy } - successCases := []npUpdateTest{ - { + successCases := map[string]npUpdateTest{ + "no change": { old: networking.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: networking.NetworkPolicySpec{ @@ -292,32 +292,6 @@ func TestValidateNetworkPolicyUpdate(t *testing.T) { }, }, }, - } - - for _, successCase := range successCases { - successCase.old.ObjectMeta.ResourceVersion = "1" - successCase.update.ObjectMeta.ResourceVersion = "1" - if errs := ValidateNetworkPolicyUpdate(&successCase.update, &successCase.old); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } - } - errorCases := map[string]npUpdateTest{ - "change name": { - old: networking.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, - Spec: networking.NetworkPolicySpec{ - PodSelector: metav1.LabelSelector{}, - Ingress: []networking.NetworkPolicyIngressRule{}, - }, - }, - update: networking.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "bar"}, - Spec: networking.NetworkPolicySpec{ - PodSelector: metav1.LabelSelector{}, - Ingress: []networking.NetworkPolicyIngressRule{}, - }, - }, - }, "change spec": { old: networking.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, @@ -338,7 +312,36 @@ func TestValidateNetworkPolicyUpdate(t *testing.T) { }, } + for testName, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateNetworkPolicyUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success (%s): %v", testName, errs) + } + } + + errorCases := map[string]npUpdateTest{ + "change name": { + old: networking.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, + Spec: networking.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + Ingress: []networking.NetworkPolicyIngressRule{}, + }, + }, + update: networking.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "bar"}, + Spec: networking.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + Ingress: []networking.NetworkPolicyIngressRule{}, + }, + }, + }, + } + for testName, errorCase := range errorCases { + errorCase.old.ObjectMeta.ResourceVersion = "1" + errorCase.update.ObjectMeta.ResourceVersion = "1" if errs := ValidateNetworkPolicyUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { t.Errorf("expected failure: %s", testName) } diff --git a/pkg/client/OWNERS b/pkg/client/OWNERS index ef75436b640..0f192509905 100644 --- a/pkg/client/OWNERS +++ b/pkg/client/OWNERS @@ -18,7 +18,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -33,7 +32,7 @@ reviewers: - justinsb - roberthbailey - ncdc -- timstclair +- tallclair - yifan-gu - eparis - mwielgus diff --git a/pkg/client/clientset_generated/clientset/BUILD b/pkg/client/clientset_generated/clientset/BUILD index c35ae87d0ce..a922681e791 100644 --- a/pkg/client/clientset_generated/clientset/BUILD +++ b/pkg/client/clientset_generated/clientset/BUILD @@ -12,6 +12,7 @@ go_library( srcs = [ "clientset.go", "doc.go", + "import.go", "import_known_versions.go", ], tags = ["automanaged"], diff --git a/pkg/client/clientset_generated/clientset/import.go b/pkg/client/clientset_generated/clientset/import.go new file mode 100644 index 00000000000..0dcf5f0b90b --- /dev/null +++ b/pkg/client/clientset_generated/clientset/import.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file exists to enforce this clientset's vanity import path. + +package clientset // import "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" diff --git a/pkg/client/leaderelection/resourcelock/BUILD b/pkg/client/leaderelectionconfig/BUILD similarity index 58% rename from pkg/client/leaderelection/resourcelock/BUILD rename to pkg/client/leaderelectionconfig/BUILD index cd0985719bd..17dc8e1376f 100644 --- a/pkg/client/leaderelection/resourcelock/BUILD +++ b/pkg/client/leaderelectionconfig/BUILD @@ -9,18 +9,13 @@ load( go_library( name = "go_default_library", - srcs = [ - "configmaplock.go", - "endpointslock.go", - "interface.go", - ], + srcs = ["config.go"], tags = ["automanaged"], deps = [ - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", ], ) diff --git a/pkg/client/leaderelectionconfig/config.go b/pkg/client/leaderelectionconfig/config.go new file mode 100644 index 00000000000..626f43229ca --- /dev/null +++ b/pkg/client/leaderelectionconfig/config.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelectionconfig + +import ( + "time" + + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rl "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/kubernetes/pkg/apis/componentconfig" +) + +const ( + DefaultLeaseDuration = 15 * time.Second + DefaultRenewDeadline = 10 * time.Second + DefaultRetryPeriod = 2 * time.Second +) + +func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfiguration { + return componentconfig.LeaderElectionConfiguration{ + LeaderElect: false, + LeaseDuration: metav1.Duration{Duration: DefaultLeaseDuration}, + RenewDeadline: metav1.Duration{Duration: DefaultRenewDeadline}, + RetryPeriod: metav1.Duration{Duration: DefaultRetryPeriod}, + ResourceLock: rl.EndpointsResourceLock, + } +} + +// BindFlags binds the common LeaderElectionCLIConfig flags to a flagset +func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) { + fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+ + "Start a leader election client and gain leadership before "+ + "executing the main loop. Enable this when running replicated "+ + "components for high availability.") + fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+ + "The duration that non-leader candidates will wait after observing a leadership "+ + "renewal until attempting to acquire leadership of a led but unrenewed leader "+ + "slot. This is effectively the maximum duration that a leader can be stopped "+ + "before it is replaced by another candidate. This is only applicable if leader "+ + "election is enabled.") + fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+ + "The interval between attempts by the acting master to renew a leadership slot "+ + "before it stops leading. This must be less than or equal to the lease duration. "+ + "This is only applicable if leader election is enabled.") + fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+ + "The duration the clients should wait between attempting acquisition and renewal "+ + "of a leadership. This is only applicable if leader election is enabled.") + fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+ + "The type of resource resource object that is used for locking during"+ + "leader election. Supported options are `endpoints` (default) and `configmap`.") +} diff --git a/pkg/client/tests/BUILD b/pkg/client/tests/BUILD index 70e2af35c29..555019ffdee 100644 --- a/pkg/client/tests/BUILD +++ b/pkg/client/tests/BUILD @@ -40,6 +40,7 @@ go_test( "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/portforward:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/client-go/transport/spdy:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], ) diff --git a/pkg/client/tests/portfoward_test.go b/pkg/client/tests/portfoward_test.go index 35d633f8008..d6122606deb 100644 --- a/pkg/client/tests/portfoward_test.go +++ b/pkg/client/tests/portfoward_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/types" restclient "k8s.io/client-go/rest" . "k8s.io/client-go/tools/portforward" - "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport/spdy" "k8s.io/kubernetes/pkg/kubelet/server/portforward" ) @@ -131,16 +131,17 @@ func TestForwardPorts(t *testing.T) { for testName, test := range tests { server := httptest.NewServer(fakePortForwardServer(t, testName, test.serverSends, test.clientSends)) - url, _ := url.Parse(server.URL) - exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", url) + transport, upgrader, err := spdy.RoundTripperFor(&restclient.Config{}) if err != nil { t.Fatal(err) } + url, _ := url.Parse(server.URL) + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", url) stopChan := make(chan struct{}, 1) readyChan := make(chan struct{}) - pf, err := New(exec, test.ports, stopChan, readyChan, os.Stdout, os.Stderr) + pf, err := New(dialer, test.ports, stopChan, readyChan, os.Stdout, os.Stderr) if err != nil { t.Fatalf("%s: unexpected error calling New: %v", testName, err) } @@ -201,17 +202,18 @@ func TestForwardPortsReturnsErrorWhenAllBindsFailed(t *testing.T) { server := httptest.NewServer(fakePortForwardServer(t, "allBindsFailed", nil, nil)) defer server.Close() - url, _ := url.Parse(server.URL) - exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", url) + transport, upgrader, err := spdy.RoundTripperFor(&restclient.Config{}) if err != nil { t.Fatal(err) } + url, _ := url.Parse(server.URL) + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", url) stopChan1 := make(chan struct{}, 1) defer close(stopChan1) readyChan1 := make(chan struct{}) - pf1, err := New(exec, []string{"5555"}, stopChan1, readyChan1, os.Stdout, os.Stderr) + pf1, err := New(dialer, []string{"5555"}, stopChan1, readyChan1, os.Stdout, os.Stderr) if err != nil { t.Fatalf("error creating pf1: %v", err) } @@ -220,7 +222,7 @@ func TestForwardPortsReturnsErrorWhenAllBindsFailed(t *testing.T) { stopChan2 := make(chan struct{}, 1) readyChan2 := make(chan struct{}) - pf2, err := New(exec, []string{"5555"}, stopChan2, readyChan2, os.Stdout, os.Stderr) + pf2, err := New(dialer, []string{"5555"}, stopChan2, readyChan2, os.Stdout, os.Stderr) if err != nil { t.Fatalf("error creating pf2: %v", err) } diff --git a/pkg/client/tests/remotecommand_test.go b/pkg/client/tests/remotecommand_test.go index cd89b81e0fa..6ea8a96ecb1 100644 --- a/pkg/client/tests/remotecommand_test.go +++ b/pkg/client/tests/remotecommand_test.go @@ -37,6 +37,7 @@ import ( remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" remoteclient "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport/spdy" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" @@ -124,7 +125,7 @@ func fakeServer(t *testing.T, testName string, exec bool, stdinData, stdoutData, opts, err := remotecommand.NewOptions(req) require.NoError(t, err) if exec { - cmd := req.URL.Query()[api.ExecCommandParamm] + cmd := req.URL.Query()[api.ExecCommandParam] remotecommand.ServeExec(w, req, executor, "pod", "uid", "container", cmd, opts, 0, 10*time.Second, serverProtocols) } else { remotecommand.ServeAttach(w, req, executor, "pod", "uid", "container", opts, 0, 10*time.Second, serverProtocols) @@ -255,17 +256,16 @@ func TestStream(t *testing.T) { conf := &restclient.Config{ Host: server.URL, } - e, err := remoteclient.NewExecutor(conf, "POST", req.URL()) + e, err := remoteclient.NewSPDYExecutorForProtocols(conf, "POST", req.URL(), testCase.ClientProtocols...) if err != nil { t.Errorf("%s: unexpected error: %v", name, err) continue } err = e.Stream(remoteclient.StreamOptions{ - SupportedProtocols: testCase.ClientProtocols, - Stdin: streamIn, - Stdout: streamOut, - Stderr: streamErr, - Tty: testCase.Tty, + Stdin: streamIn, + Stdout: streamOut, + Stderr: streamErr, + Tty: testCase.Tty, }) hasErr := err != nil @@ -311,11 +311,13 @@ type fakeUpgrader struct { conn httpstream.Connection err, connErr error checkResponse bool + called bool t *testing.T } func (u *fakeUpgrader) RoundTrip(req *http.Request) (*http.Response, error) { + u.called = true u.req = req return u.resp, u.err } @@ -344,27 +346,16 @@ func TestDial(t *testing.T) { Body: ioutil.NopCloser(&bytes.Buffer{}), }, } - var called bool - testFn := func(rt http.RoundTripper) http.RoundTripper { - if rt != upgrader { - t.Fatalf("unexpected round tripper: %#v", rt) - } - called = true - return rt - } - exec, err := remoteclient.NewStreamExecutor(upgrader, testFn, "POST", &url.URL{Host: "something.com", Scheme: "https"}) - if err != nil { - t.Fatal(err) - } - conn, protocol, err := exec.Dial("protocol1") + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: upgrader}, "POST", &url.URL{Host: "something.com", Scheme: "https"}) + conn, protocol, err := dialer.Dial("protocol1") if err != nil { t.Fatal(err) } if conn != upgrader.conn { t.Errorf("unexpected connection: %#v", conn) } - if !called { - t.Errorf("wrapper not called") + if !upgrader.called { + t.Errorf("request not called") } _ = protocol } diff --git a/pkg/client/unversioned/OWNERS b/pkg/client/unversioned/OWNERS index 5ed1fb6e27a..33d109321c4 100755 --- a/pkg/client/unversioned/OWNERS +++ b/pkg/client/unversioned/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - ixdy - gmarek - erictune @@ -22,7 +21,7 @@ reviewers: - zmerlynn - janetkuo - ncdc -- timstclair +- tallclair - mwielgus - timothysc - feiskyer diff --git a/pkg/cloudprovider/OWNERS b/pkg/cloudprovider/OWNERS index 657fe57964b..5abbe51cb2c 100644 --- a/pkg/cloudprovider/OWNERS +++ b/pkg/cloudprovider/OWNERS @@ -12,7 +12,6 @@ reviewers: - vishh - mikedanese - liggitt -- bprashanth - gmarek - erictune - davidopp @@ -34,7 +33,6 @@ reviewers: - rootfs - jszczepkowski - markturansky -- vmarmol - girishkalele - satnam6502 - jdef diff --git a/pkg/cloudprovider/providers/BUILD b/pkg/cloudprovider/providers/BUILD index 0bf53f7bb27..3842437c84a 100644 --- a/pkg/cloudprovider/providers/BUILD +++ b/pkg/cloudprovider/providers/BUILD @@ -16,7 +16,6 @@ go_library( "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/cloudprovider/providers/cloudstack:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", - "//pkg/cloudprovider/providers/mesos:go_default_library", "//pkg/cloudprovider/providers/openstack:go_default_library", "//pkg/cloudprovider/providers/ovirt:go_default_library", "//pkg/cloudprovider/providers/photon:go_default_library", @@ -41,7 +40,6 @@ filegroup( "//pkg/cloudprovider/providers/cloudstack:all-srcs", "//pkg/cloudprovider/providers/fake:all-srcs", "//pkg/cloudprovider/providers/gce:all-srcs", - "//pkg/cloudprovider/providers/mesos:all-srcs", "//pkg/cloudprovider/providers/openstack:all-srcs", "//pkg/cloudprovider/providers/ovirt:all-srcs", "//pkg/cloudprovider/providers/photon:all-srcs", diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index d96692adeb9..40256006f6c 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -604,7 +604,7 @@ func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*e response, err := s.ec2.DescribeInstances(request) if err != nil { recordAwsMetric("describe_instance", 0, err) - return nil, fmt.Errorf("error listing AWS instances: %v", err) + return nil, fmt.Errorf("error listing AWS instances: %q", err) } for _, reservation := range response.Reservations { @@ -627,7 +627,7 @@ func (s *awsSdkEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsIn // Security groups are not paged response, err := s.ec2.DescribeSecurityGroups(request) if err != nil { - return nil, fmt.Errorf("error listing AWS security groups: %v", err) + return nil, fmt.Errorf("error listing AWS security groups: %q", err) } return response.SecurityGroups, nil } @@ -658,7 +658,7 @@ func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.V if err != nil { recordAwsMetric("describe_volume", 0, err) - return nil, fmt.Errorf("error listing AWS volumes: %v", err) + return nil, fmt.Errorf("error listing AWS volumes: %q", err) } results = append(results, response.Volumes...) @@ -694,7 +694,7 @@ func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.S // Subnets are not paged response, err := s.ec2.DescribeSubnets(request) if err != nil { - return nil, fmt.Errorf("error listing AWS subnets: %v", err) + return nil, fmt.Errorf("error listing AWS subnets: %q", err) } return response.Subnets, nil } @@ -727,7 +727,7 @@ func (s *awsSdkEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ( // Not paged response, err := s.ec2.DescribeRouteTables(request) if err != nil { - return nil, fmt.Errorf("error listing AWS route tables: %v", err) + return nil, fmt.Errorf("error listing AWS route tables: %q", err) } return response.RouteTables, nil } @@ -816,7 +816,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { metadata, err := awsServices.Metadata() if err != nil { - return nil, fmt.Errorf("error creating AWS metadata client: %v", err) + return nil, fmt.Errorf("error creating AWS metadata client: %q", err) } cfg, err := readAWSCloudConfig(config, metadata) @@ -963,7 +963,7 @@ func (c *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { internalIP, err := c.metadata.GetMetadata("local-ipv4") if err != nil { - return nil, fmt.Errorf("error querying AWS metadata for %q: %v", "local-ipv4", err) + return nil, fmt.Errorf("error querying AWS metadata for %q: %q", "local-ipv4", err) } addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalIP, Address: internalIP}) @@ -999,7 +999,7 @@ func (c *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { instance, err := c.getInstanceByNodeName(name) if err != nil { - return nil, fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) + return nil, fmt.Errorf("getInstanceByNodeName failed for %q with %q", name, err) } return extractNodeAddresses(instance) } @@ -1090,7 +1090,7 @@ func (c *Cloud) InstanceID(nodeName types.NodeName) (string, error) { } inst, err := c.getInstanceByNodeName(nodeName) if err != nil { - return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err) + return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err) } return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil } @@ -1119,7 +1119,7 @@ func (c *Cloud) InstanceType(nodeName types.NodeName) (string, error) { } inst, err := c.getInstanceByNodeName(nodeName) if err != nil { - return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", nodeName, err) + return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err) } return aws.StringValue(inst.InstanceType), nil } @@ -1376,7 +1376,7 @@ func (d *awsDisk) describeVolume() (*ec2.Volume, error) { volumes, err := d.ec2.DescribeVolumes(request) if err != nil { - return nil, fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) + return nil, fmt.Errorf("error querying ec2 for volume %q: %q", volumeID, err) } if len(volumes) == 0 { return nil, fmt.Errorf("no volumes found for volume %q", volumeID) @@ -1462,7 +1462,7 @@ func (d *awsDisk) deleteVolume() (bool, error) { return false, volume.NewDeletedVolumeInUseError(err.Error()) } } - return false, fmt.Errorf("error deleting EBS volume %q: %v", d.awsID, err) + return false, fmt.Errorf("error deleting EBS volume %q: %q", d.awsID, err) } return true, nil } @@ -1475,7 +1475,7 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) { } instanceID, err := c.metadata.GetMetadata("instance-id") if err != nil { - return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %v", err) + return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %q", err) } // We want to fetch the hostname via the EC2 metadata service @@ -1488,7 +1488,7 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) { // have two code paths. instance, err := c.getInstanceByID(instanceID) if err != nil { - return nil, fmt.Errorf("error finding instance %s: %v", instanceID, err) + return nil, fmt.Errorf("error finding instance %s: %q", instanceID, err) } return newAWSInstance(c.ec2, instance), nil } @@ -1517,19 +1517,19 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { if awsError.Code() == "VolumeInUse" { info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %v", disk.awsID, err) + glog.Errorf("Error describing volume %q: %q", disk.awsID, err) } else { for _, a := range info.Attachments { if disk.awsID != awsVolumeID(aws.StringValue(a.VolumeId)) { glog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) } else if aws.StringValue(a.State) == "attached" { - return fmt.Errorf("Error attaching EBS volume %q to instance %q: %v. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) + return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) } } } } } - return fmt.Errorf("Error attaching EBS volume %q to instance %q: %v", disk.awsID, instance, err) + return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err) } // AttachDisk implements Volumes.AttachDisk @@ -1541,7 +1541,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, awsInstance, info, err := c.getFullInstance(nodeName) if err != nil { - return "", fmt.Errorf("error finding instance %s: %v", nodeName, err) + return "", fmt.Errorf("error finding instance %s: %q", nodeName, err) } if readOnly { @@ -1659,7 +1659,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) response, err := c.ec2.DetachVolume(&request) if err != nil { - return "", fmt.Errorf("error detaching EBS volume %q from %q: %v", disk.awsID, awsInstance.awsID, err) + return "", fmt.Errorf("error detaching EBS volume %q from %q: %q", disk.awsID, awsInstance.awsID, err) } if response == nil { return "", errors.New("no response from DetachVolume") @@ -1771,9 +1771,9 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er _, delerr := c.DeleteDisk(volumeName) if delerr != nil { // delete did not succeed, we have a stray volume! - return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %v", volumeName, delerr) + return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %q", volumeName, delerr) } - return "", fmt.Errorf("error tagging volume %s: %v", volumeName, err) + return "", fmt.Errorf("error tagging volume %s: %q", volumeName, err) } return volumeName, nil @@ -1959,7 +1959,7 @@ func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, func (c *Cloud) findVPCID() (string, error) { macs, err := c.metadata.GetMetadata("network/interfaces/macs/") if err != nil { - return "", fmt.Errorf("Could not list interfaces of the instance: %v", err) + return "", fmt.Errorf("Could not list interfaces of the instance: %q", err) } // loop over interfaces, first vpc id returned wins @@ -2088,7 +2088,7 @@ func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) b func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error) { group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warning("Error retrieving security group", err) + glog.Warningf("Error retrieving security group %q", err) return false, err } @@ -2134,7 +2134,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe request.IpPermissions = add.List() _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { - return false, fmt.Errorf("error authorizing security group ingress: %v", err) + return false, fmt.Errorf("error authorizing security group ingress: %q", err) } } if remove.Len() != 0 { @@ -2145,7 +2145,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe request.IpPermissions = remove.List() _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { - return false, fmt.Errorf("error revoking security group ingress: %v", err) + return false, fmt.Errorf("error revoking security group ingress: %q", err) } } @@ -2158,7 +2158,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions []*ec2.IpPermission) (bool, error) { group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %v", err) + glog.Warningf("Error retrieving security group: %q", err) return false, err } @@ -2201,8 +2201,8 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ request.IpPermissions = changes _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { - glog.Warning("Error authorizing security group ingress", err) - return false, fmt.Errorf("error authorizing security group ingress: %v", err) + glog.Warningf("Error authorizing security group ingress %q", err) + return false, fmt.Errorf("error authorizing security group ingress: %q", err) } return true, nil @@ -2214,7 +2214,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermissions []*ec2.IpPermission) (bool, error) { group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %v", err) + glog.Warningf("Error retrieving security group: %q", err) return false, err } @@ -2256,7 +2256,7 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss request.IpPermissions = changes _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error revoking security group ingress: %v", err) + glog.Warningf("Error revoking security group ingress: %q", err) return false, err } @@ -2319,7 +2319,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string) (string, er } } if !ignore { - glog.Error("Error creating security group: ", err) + glog.Errorf("Error creating security group: %q", err) return "", err } time.Sleep(1 * time.Second) @@ -2338,7 +2338,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string) (string, er // will add the missing tags. We could delete the security // group here, but that doesn't feel like the right thing, as // the caller is likely to retry the create - return "", fmt.Errorf("error tagging security group: %v", err) + return "", fmt.Errorf("error tagging security group: %q", err) } return groupID, nil } @@ -2363,7 +2363,7 @@ func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { subnets, err := c.ec2.DescribeSubnets(request) if err != nil { - return nil, fmt.Errorf("error describing subnets: %v", err) + return nil, fmt.Errorf("error describing subnets: %q", err) } var matches []*ec2.Subnet @@ -2386,7 +2386,7 @@ func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { subnets, err = c.ec2.DescribeSubnets(request) if err != nil { - return nil, fmt.Errorf("error describing subnets: %v", err) + return nil, fmt.Errorf("error describing subnets: %q", err) } return subnets, nil @@ -2407,7 +2407,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { rRequest.Filters = []*ec2.Filter{vpcIDFilter} rt, err := c.ec2.DescribeRouteTables(rRequest) if err != nil { - return nil, fmt.Errorf("error describe route table: %v", err) + return nil, fmt.Errorf("error describe route table: %q", err) } subnetsByAZ := make(map[string]*ec2.Subnet) @@ -2553,7 +2553,7 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription) if err != nil { - glog.Error("Error creating load balancer security group: ", err) + glog.Errorf("Error creating load balancer security group: %q", err) return nil, err } } @@ -2770,7 +2770,7 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Error("Error listing subnets in VPC: ", err) + glog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } @@ -2846,7 +2846,7 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path) if err != nil { - return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %v", loadBalancerName, healthCheckNodePort, err) + return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err) } } else { glog.V(4).Infof("service %v does not need custom health checks", apiService.Name) @@ -2868,13 +2868,13 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %v", err) + glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } err = c.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { - glog.Warningf("Error registering instances with the load balancer: %v", err) + glog.Warningf("Error registering instances with the load balancer: %q", err) return nil, err } @@ -2964,7 +2964,7 @@ func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) request.Filters = c.tagging.addFilters(nil) groups, err := c.ec2.DescribeSecurityGroups(request) if err != nil { - return nil, fmt.Errorf("error querying security groups: %v", err) + return nil, fmt.Errorf("error querying security groups: %q", err) } m := make(map[string]*ec2.SecurityGroup) @@ -3016,7 +3016,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer describeRequest.Filters = c.tagging.addFilters(filters) response, err := c.ec2.DescribeSecurityGroups(describeRequest) if err != nil { - return fmt.Errorf("error querying security groups for ELB: %v", err) + return fmt.Errorf("error querying security groups for ELB: %q", err) } for _, sg := range response { if !c.tagging.hasClusterTag(sg.Tags) { @@ -3028,7 +3028,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer taggedSecurityGroups, err := c.getTaggedSecurityGroups() if err != nil { - return fmt.Errorf("error querying for tagged security groups: %v", err) + return fmt.Errorf("error querying for tagged security groups: %q", err) } // Open the firewall from the load balancer to the instance @@ -3133,7 +3133,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servic // De-authorize the load balancer security group from the instances security group err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { - glog.Error("Error deregistering load balancer from instance security groups: ", err) + glog.Errorf("Error deregistering load balancer from instance security groups: %q", err) return err } } @@ -3146,7 +3146,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servic _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted - glog.Error("Error deleting load balancer: ", err) + glog.Errorf("Error deleting load balancer: %q", err) return err } } @@ -3188,7 +3188,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servic } } if !ignore { - return fmt.Errorf("error while deleting load balancer security group (%s): %v", securityGroupID, err) + return fmt.Errorf("error while deleting load balancer security group (%s): %q", securityGroupID, err) } } } diff --git a/pkg/cloudprovider/providers/aws/aws_instancegroups.go b/pkg/cloudprovider/providers/aws/aws_instancegroups.go index af3ff541a0a..6bf63ae6fe6 100644 --- a/pkg/cloudprovider/providers/aws/aws_instancegroups.go +++ b/pkg/cloudprovider/providers/aws/aws_instancegroups.go @@ -37,7 +37,7 @@ func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error { MaxSize: aws.Int64(int64(size)), } if _, err := asg.UpdateAutoScalingGroup(request); err != nil { - return fmt.Errorf("error resizing AWS autoscaling group: %v", err) + return fmt.Errorf("error resizing AWS autoscaling group: %q", err) } return nil } @@ -57,7 +57,7 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo } response, err := asg.DescribeAutoScalingGroups(request) if err != nil { - return nil, fmt.Errorf("error listing AWS autoscaling group (%s): %v", instanceGroupName, err) + return nil, fmt.Errorf("error listing AWS autoscaling group (%s): %q", instanceGroupName, err) } if len(response.AutoScalingGroups) == 0 { diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index d08403d5785..0a6504ae661 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -139,7 +139,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala glog.V(2).Info("Detaching load balancer from removed subnets") _, err := c.elb.DetachLoadBalancerFromSubnets(request) if err != nil { - return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err) + return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %q", err) } dirty = true } @@ -151,7 +151,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala glog.V(2).Info("Attaching load balancer to added subnets") _, err := c.elb.AttachLoadBalancerToSubnets(request) if err != nil { - return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err) + return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %q", err) } dirty = true } @@ -170,7 +170,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala glog.V(2).Info("Applying updated security groups to load balancer") _, err := c.elb.ApplySecurityGroupsToLoadBalancer(request) if err != nil { - return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err) + return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %q", err) } dirty = true } @@ -230,7 +230,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala glog.V(2).Info("Deleting removed load balancer listeners") _, err := c.elb.DeleteLoadBalancerListeners(request) if err != nil { - return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err) + return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %q", err) } dirty = true } @@ -242,7 +242,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala glog.V(2).Info("Creating added load balancer listeners") _, err := c.elb.CreateLoadBalancerListeners(request) if err != nil { - return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err) + return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %q", err) } dirty = true } @@ -339,7 +339,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala modifyAttributesRequest.LoadBalancerAttributes = loadBalancerAttributes _, err = c.elb.ModifyLoadBalancerAttributes(modifyAttributesRequest) if err != nil { - return nil, fmt.Errorf("Unable to update load balancer attributes during attribute sync: %v", err) + return nil, fmt.Errorf("Unable to update load balancer attributes during attribute sync: %q", err) } dirty = true } @@ -411,7 +411,7 @@ func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDesc _, err := c.elb.ConfigureHealthCheck(request) if err != nil { - return fmt.Errorf("error configuring load-balancer health-check for %q: %v", name, err) + return fmt.Errorf("error configuring load-balancer health-check for %q: %q", name, err) } return nil @@ -486,7 +486,7 @@ func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { glog.V(2).Info("Creating proxy protocol policy on load balancer") _, err := c.elb.CreateLoadBalancerPolicy(request) if err != nil { - return fmt.Errorf("error creating proxy protocol policy on load balancer: %v", err) + return fmt.Errorf("error creating proxy protocol policy on load balancer: %q", err) } return nil @@ -505,7 +505,7 @@ func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, } _, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request) if err != nil { - return fmt.Errorf("error adjusting AWS loadbalancer backend policies: %v", err) + return fmt.Errorf("error adjusting AWS loadbalancer backend policies: %q", err) } return nil diff --git a/pkg/cloudprovider/providers/aws/aws_routes.go b/pkg/cloudprovider/providers/aws/aws_routes.go index f24220d78fa..c2563ace17b 100644 --- a/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/pkg/cloudprovider/providers/aws/aws_routes.go @@ -131,7 +131,7 @@ func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCh _, err := c.ec2.ModifyInstanceAttribute(request) if err != nil { - return fmt.Errorf("error configuring source-dest-check on instance %s: %v", instanceID, err) + return fmt.Errorf("error configuring source-dest-check on instance %s: %q", instanceID, err) } return nil } @@ -178,7 +178,7 @@ func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudpro _, err = c.ec2.DeleteRoute(request) if err != nil { - return fmt.Errorf("error deleting blackholed AWS route (%s): %v", aws.StringValue(deleteRoute.DestinationCidrBlock), err) + return fmt.Errorf("error deleting blackholed AWS route (%s): %q", aws.StringValue(deleteRoute.DestinationCidrBlock), err) } } @@ -190,7 +190,7 @@ func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudpro _, err = c.ec2.CreateRoute(request) if err != nil { - return fmt.Errorf("error creating AWS route (%s): %v", route.DestinationCIDR, err) + return fmt.Errorf("error creating AWS route (%s): %q", route.DestinationCIDR, err) } return nil @@ -210,7 +210,7 @@ func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) erro _, err = c.ec2.DeleteRoute(request) if err != nil { - return fmt.Errorf("error deleting AWS route (%s): %v", route.DestinationCIDR, err) + return fmt.Errorf("error deleting AWS route (%s): %q", route.DestinationCIDR, err) } return nil diff --git a/pkg/cloudprovider/providers/aws/tags.go b/pkg/cloudprovider/providers/aws/tags.go index ef89d17266b..2b0ff8c2c7c 100644 --- a/pkg/cloudprovider/providers/aws/tags.go +++ b/pkg/cloudprovider/providers/aws/tags.go @@ -179,7 +179,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy } if err := c.createTags(client, resourceID, lifecycle, addTags); err != nil { - return fmt.Errorf("error adding missing tags to resource %q: %v", resourceID, err) + return fmt.Errorf("error adding missing tags to resource %q: %q", resourceID, err) } return nil @@ -222,7 +222,7 @@ func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle Resourc // We could check that the error is retryable, but the error code changes based on what we are tagging // SecurityGroup: InvalidGroup.NotFound - glog.V(2).Infof("Failed to create tags; will retry. Error was %v", err) + glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) lastErr = err return false, nil }) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index be58887a2fd..2dc4b7b20e8 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -13,17 +13,19 @@ go_library( srcs = [ "azure.go", "azure_backoff.go", - "azure_blob.go", + "azure_blobDiskController.go", + "azure_controllerCommon.go", "azure_file.go", + "azure_instance_metadata.go", "azure_instances.go", "azure_loadbalancer.go", + "azure_managedDiskController.go", "azure_routes.go", "azure_storage.go", "azure_storageaccount.go", "azure_util.go", "azure_wrap.go", "azure_zones.go", - "vhd.go", ], tags = ["automanaged"], deps = [ @@ -33,15 +35,18 @@ go_library( "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", + "//vendor/golang.org/x/crypto/pkcs12:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index ca2e48b7e6c..8ae82707ded 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -17,6 +17,8 @@ limitations under the License. package azure import ( + "crypto/rsa" + "crypto/x509" "fmt" "io" "io/ioutil" @@ -28,12 +30,15 @@ import ( "k8s.io/kubernetes/pkg/version" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" "github.com/golang/glog" + "golang.org/x/crypto/pkcs12" "k8s.io/apimachinery/pkg/util/wait" ) @@ -80,6 +85,10 @@ type Config struct { AADClientID string `json:"aadClientId" yaml:"aadClientId"` // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"` + // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"` + // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"` // Enable exponential backoff to manage resource request retries CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"` // Backoff retry limit @@ -96,6 +105,12 @@ type Config struct { CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"` // Rate limit Bucket Size CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"` + + // Use instance metadata service where possible + UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"` + + // Use managed service identity for the virtual machine to access Azure ARM APIs + UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` } // Cloud holds the config and clients @@ -111,100 +126,149 @@ type Cloud struct { SecurityGroupsClient network.SecurityGroupsClient VirtualMachinesClient compute.VirtualMachinesClient StorageAccountClient storage.AccountsClient + DisksClient disk.DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff + + *BlobDiskController + *ManagedDiskController + *controllerCommon } func init() { cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud) } +// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and +// the private RSA key +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err) + } + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// GetServicePrincipalToken creates a new service principal token based on the configuration +func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) + if err != nil { + return nil, fmt.Errorf("creating the OAuth config: %v", err) + } + + if config.UseManagedIdentityExtension { + glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + return adal.NewServicePrincipalTokenFromMSI( + *oauthConfig, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientSecret) > 0 { + glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + return adal.NewServicePrincipalToken( + *oauthConfig, + config.AADClientID, + config.AADClientSecret, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + certData, err := ioutil.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) + } + certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) + } + + return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) +} + // NewCloud returns a Cloud with initialized clients func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { - var az Cloud - - configContents, err := ioutil.ReadAll(configReader) + config, env, err := ParseConfig(configReader) if err != nil { return nil, err } - err = yaml.Unmarshal(configContents, &az) - if err != nil { - return nil, err + az := Cloud{ + Config: *config, + Environment: *env, } - if az.Cloud == "" { - az.Environment = azure.PublicCloud - } else { - az.Environment, err = azure.EnvironmentFromName(az.Cloud) - if err != nil { - return nil, err - } - } - - oauthConfig, err := az.Environment.OAuthConfigForTenant(az.TenantID) - if err != nil { - return nil, err - } - - servicePrincipalToken, err := azure.NewServicePrincipalToken( - *oauthConfig, - az.AADClientID, - az.AADClientSecret, - az.Environment.ServiceManagementEndpoint) + servicePrincipalToken, err := GetServicePrincipalToken(config, env) if err != nil { return nil, err } az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID) az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SubnetsClient.Authorizer = servicePrincipalToken + az.SubnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.SubnetsClient.PollingDelay = 5 * time.Second configureUserAgent(&az.SubnetsClient.Client) az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RouteTablesClient.Authorizer = servicePrincipalToken + az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.RouteTablesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.RouteTablesClient.Client) az.RoutesClient = network.NewRoutesClient(az.SubscriptionID) az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RoutesClient.Authorizer = servicePrincipalToken + az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.RoutesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.RoutesClient.Client) az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID) az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.InterfacesClient.Authorizer = servicePrincipalToken + az.InterfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.InterfacesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.InterfacesClient.Client) az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID) az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.LoadBalancerClient.Authorizer = servicePrincipalToken + az.LoadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.LoadBalancerClient.PollingDelay = 5 * time.Second configureUserAgent(&az.LoadBalancerClient.Client) az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID) az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.VirtualMachinesClient.Authorizer = servicePrincipalToken + az.VirtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.VirtualMachinesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.VirtualMachinesClient.Client) az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID) az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.PublicIPAddressesClient.Authorizer = servicePrincipalToken + az.PublicIPAddressesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.PublicIPAddressesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.PublicIPAddressesClient.Client) az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID) az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SecurityGroupsClient.Authorizer = servicePrincipalToken + az.SecurityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.SecurityGroupsClient.PollingDelay = 5 * time.Second configureUserAgent(&az.SecurityGroupsClient.Client) az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - az.StorageAccountClient.Authorizer = servicePrincipalToken + az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.StorageAccountClient.Client) + + az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) + az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.DisksClient.Client) // Conditionally configure rate limits if az.CloudProviderRateLimit { @@ -254,9 +318,37 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.CloudProviderBackoffJitter) } + if err := initDiskControllers(&az); err != nil { + return nil, err + } return &az, nil } +// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file +func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) { + var config Config + + configContents, err := ioutil.ReadAll(configReader) + if err != nil { + return nil, nil, err + } + err = yaml.Unmarshal(configContents, &config) + if err != nil { + return nil, nil, err + } + + var env azure.Environment + if config.Cloud == "" { + env = azure.PublicCloud + } else { + env, err = azure.EnvironmentFromName(config.Cloud) + if err != nil { + return nil, nil, err + } + } + return &config, &env, nil +} + // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} @@ -303,3 +395,42 @@ func configureUserAgent(client *autorest.Client) { k8sVersion := version.Get().GitVersion client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion) } + +func initDiskControllers(az *Cloud) error { + // Common controller contains the function + // needed by both blob disk and managed disk controllers + + common := &controllerCommon{ + aadResourceEndPoint: az.Environment.ServiceManagementEndpoint, + clientID: az.AADClientID, + clientSecret: az.AADClientSecret, + location: az.Location, + storageEndpointSuffix: az.Environment.StorageEndpointSuffix, + managementEndpoint: az.Environment.ResourceManagerEndpoint, + resourceGroup: az.ResourceGroup, + tenantID: az.TenantID, + tokenEndPoint: az.Environment.ActiveDirectoryEndpoint, + subscriptionID: az.SubscriptionID, + cloud: az, + } + + // BlobDiskController: contains the function needed to + // create/attach/detach/delete blob based (unmanaged disks) + blobController, err := newBlobDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error()) + } + + // ManagedDiskController: contains the functions needed to + // create/attach/detach/delete managed disks + managedController, err := newManagedDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error()) + } + + az.BlobDiskController = blobController + az.ManagedDiskController = managedController + az.controllerCommon = common + + return nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 3fca4c49334..839592f3035 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -47,8 +47,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -56,8 +58,10 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -65,8 +69,10 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -74,8 +80,10 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -83,7 +91,9 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -92,7 +102,9 @@ func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { func (az *Cloud) DeleteLBWithRetry(lbName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -101,8 +113,10 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error { func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -110,8 +124,10 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -119,7 +135,9 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { func (az *Cloud) DeleteRouteWithRetry(routeName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -128,8 +146,10 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } diff --git a/pkg/cloudprovider/providers/azure/azure_blob.go b/pkg/cloudprovider/providers/azure/azure_blob.go deleted file mode 100644 index 47d1edd130f..00000000000 --- a/pkg/cloudprovider/providers/azure/azure_blob.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "fmt" - "regexp" - "strings" - - azs "github.com/Azure/azure-sdk-for-go/storage" -) - -const ( - vhdContainerName = "vhds" - useHTTPS = true - blobServiceName = "blob" -) - -// create page blob -func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err != nil { - return "", "", err - } - size := 1024 * 1024 * 1024 * sizeGB - vhdSize := size + vhdHeaderSize /* header size */ - // Blob name in URL must end with '.vhd' extension. - name = name + ".vhd" - err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags) - if err != nil { - // if container doesn't exist, create one and retry PutPageBlob - detail := err.Error() - if strings.Contains(detail, errContainerNotFound) { - err = blobClient.CreateContainer(vhdContainerName, azs.ContainerAccessTypePrivate) - if err == nil { - err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags) - } - } - } - if err != nil { - return "", "", fmt.Errorf("failed to put page blob: %v", err) - } - - // add VHD signature to the blob - h, err := createVHDHeader(uint64(size)) - if err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) - } - if err = blobClient.PutPage(vhdContainerName, name, size, vhdSize-1, azs.PageWriteTypeUpdate, h[:vhdHeaderSize], nil); err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to update vhd header, err: %v", err) - } - - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix) - uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name) - return name, uri, nil - -} - -// delete a vhd blob -func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err == nil { - return blobClient.DeleteBlob(vhdContainerName, blobName, nil) - } - return err -} - -func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) { - client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS) - if err != nil { - return nil, fmt.Errorf("error creating azure client: %v", err) - } - b := client.GetBlobService() - return &b, nil -} - -// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) -func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) { - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix) - reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) - re := regexp.MustCompile(reStr) - res := re.FindSubmatch([]byte(uri)) - if len(res) < 3 { - return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri) - } - return string(res[1]), string(res[2]), nil -} diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go new file mode 100644 index 00000000000..037c4941ef2 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -0,0 +1,808 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "net/url" + "os" + "regexp" + "sync" + + "strconv" + "strings" + "sync/atomic" + "time" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + azstorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/glog" + "github.com/rubiojr/go-vhd/vhd" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + vhdContainerName = "vhds" + useHTTPSForBlobBasedDisk = true + blobServiceName = "blob" +) + +type storageAccountState struct { + name string + saType storage.SkuName + key string + diskCount int32 + isValidating int32 + defaultContainerCreated bool +} + +//BlobDiskController : blob disk controller struct +type BlobDiskController struct { + common *controllerCommon + accounts map[string]*storageAccountState +} + +var defaultContainerName = "" +var storageAccountNamePrefix = "" +var storageAccountNameMatch = "" +var initFlag int64 + +var accountsLock = &sync.Mutex{} + +func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { + c := BlobDiskController{common: common} + err := c.init() + + if err != nil { + return nil, err + } + + return &c, nil +} + +// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group +func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) { + key, err := c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel) + err = <-errchan + if err != nil { + return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err)) + } + + key, err = c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount) + } + + glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup) + return "", "", 0, err + } + + client, err := azstorage.NewBasicClient(storageAccount, key) + if err != nil { + return "", "", 0, err + } + blobClient := client.GetBlobService() + + container := blobClient.GetContainerReference(vhdContainerName) + _, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", 0, err + } + + diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB)) + if err != nil { + return "", "", 0, err + } + + glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + return diskName, diskURI, requestGB, err +} + +// DeleteVolume deletes a VHD blob +func (c *BlobDiskController) DeleteVolume(diskURI string) error { + glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) + accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI) + if err != nil { + return fmt.Errorf("failed to parse vhd URI %v", err) + } + key, err := c.common.cloud.getStorageAccesskey(accountName) + if err != nil { + return fmt.Errorf("no key for storage account %s, err %v", accountName, err) + } + err = c.common.cloud.deleteVhdBlob(accountName, key, blob) + if err != nil { + glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) + detail := err.Error() + if strings.Contains(detail, errLeaseIDMissing) { + // disk is still being used + // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx + return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI)) + } + return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err) + } + glog.V(4).Infof("azureDisk - blob %s deleted", diskURI) + return nil + +} + +// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) +func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) { + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix) + reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) + re := regexp.MustCompile(reStr) + res := re.FindSubmatch([]byte(diskURI)) + if len(res) < 3 { + return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI) + } + return string(res[1]), string(res[2]), nil +} + +func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) { + container := blobClient.GetContainerReference(containerName) + _, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", err + } + + size := 1024 * 1024 * 1024 * sizeGB + vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */ + // Blob name in URL must end with '.vhd' extension. + vhdName = vhdName + ".vhd" + + tags := make(map[string]string) + tags["createdby"] = "k8sAzureDataDisk" + glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName) + + blob := container.GetBlobReference(vhdName) + blob.Properties.ContentLength = vhdSize + blob.Metadata = tags + err = blob.PutPageBlob(nil) + if err != nil { + return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err) + } + + // add VHD signature to the blob + h, err := createVHDHeader(uint64(size)) + if err != nil { + blob.DeleteIfExists(nil) + return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) + } + + blobRange := azstorage.BlobRange{ + Start: uint64(size), + End: uint64(vhdSize - 1), + } + if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil { + glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", + vhdName, containerName, accountName, err.Error()) + return "", "", err + } + + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + + host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix) + uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName) + return vhdName, uri, nil +} + +// delete a vhd blob +func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error { + client, err := azstorage.NewBasicClient(accountName, accountKey) + if err != nil { + return err + } + blobSvc := client.GetBlobService() + + container := blobSvc.GetContainerReference(vhdContainerName) + blob := container.GetBlobReference(blobName) + return blob.Delete(nil) +} + +//CreateBlobDisk : create a blob disk in a node +func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) { + glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone) + + var storageAccountName = "" + var err error + + if forceStandAlone { + // we have to wait until the storage account is is created + storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName) + err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false) + if err != nil { + return "", err + } + } else { + storageAccountName, err = c.findSANameForDisk(storageAccountType) + if err != nil { + return "", err + } + } + + blobClient, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return "", err + } + + _, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB)) + if err != nil { + return "", err + } + + if !forceStandAlone { + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1) + } + + return diskURI, nil +} + +//DeleteBlobDisk : delete a blob disk from a node +func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error { + storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI) + if err != nil { + return err + } + + _, ok := c.accounts[storageAccountName] + if !ok { + // the storage account is specified by user + glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) + return c.DeleteVolume(diskURI) + } + // if forced (as in one disk = one storage account) + // delete the account completely + if wasForced { + return c.deleteStorageAccount(storageAccountName) + } + + blobSvc, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return err + } + + glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName) + + container := blobSvc.GetContainerReference(defaultContainerName) + blob := container.GetBlobReference(vhdName) + _, err = blob.DeleteIfExists(nil) + + if c.accounts[storageAccountName].diskCount == -1 { + if diskCount, err := c.getDiskCount(storageAccountName); err != nil { + c.accounts[storageAccountName].diskCount = int32(diskCount) + } else { + glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) + return nil // we have failed to aquire a new count. not an error condition + } + } + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1) + return err +} + +// Init tries best effort to ensure that 2 accounts standard/premium were created +// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases) +func (c *BlobDiskController) init() error { + if !c.shouldInit() { + return nil + } + + c.setUniqueStrings() + + // get accounts + accounts, err := c.getAllStorageAccounts() + if err != nil { + return err + } + c.accounts = accounts + + if len(c.accounts) == 0 { + counter := 1 + for counter <= storageAccountsCountInit { + + accountType := storage.PremiumLRS + if n := math.Mod(float64(counter), 2); n == 0 { + accountType = storage.StandardLRS + } + + // We don't really care if these calls failed + // at this stage, we are trying to ensure 2 accounts (Standard/Premium) + // are there ready for PVC creation + + // if we failed here, the accounts will be created in the process + // of creating PVC + + // nor do we care if they were partially created, as the entire + // account creation process is idempotent + go func(thisNext int) { + newAccountName := getAccountNameForNum(thisNext) + + glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType) + err := c.createStorageAccount(newAccountName, accountType, c.common.location, true) + // TODO return created and error from + if err != nil { + glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error()) + + } else { + glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName) + } + }(counter) + counter = counter + 1 + } + } + + return nil +} + +//Sets unique strings to be used as accountnames && || blob containers names +func (c *BlobDiskController) setUniqueStrings() { + uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID + hash := MakeCRC32(uniqueString) + //used to generate a unqie container name used by this cluster PVC + defaultContainerName = hash + + storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash) + // Used to filter relevant accounts (accounts used by shared PVC) + storageAccountNameMatch = storageAccountNamePrefix + // Used as a template to create new names for relevant accounts + storageAccountNamePrefix = storageAccountNamePrefix + "%s" +} +func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) { + if account, exists := c.accounts[SAName]; exists && account.key != "" { + return c.accounts[SAName].key, nil + } + listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName) + if err != nil { + return "", err + } + if listKeysResult.Keys == nil { + return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName) + } + for _, v := range *listKeysResult.Keys { + if v.Value != nil && *v.Value == "key1" { + if _, ok := c.accounts[SAName]; !ok { + glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) + return *v.Value, nil + } + } + + c.accounts[SAName].key = *v.Value + return c.accounts[SAName].key, nil + } + + return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName) +} + +func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) { + key := "" + var client azstorage.Client + var blobSvc azstorage.BlobStorageClient + var err error + if key, err = c.getStorageAccountKey(SAName); err != nil { + return blobSvc, err + } + + if client, err = azstorage.NewBasicClient(SAName, key); err != nil { + return blobSvc, err + } + + blobSvc = client.GetBlobService() + return blobSvc, nil +} + +func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error { + var err error + var blobSvc azstorage.BlobStorageClient + + // short circut the check via local cache + // we are forgiving the fact that account may not be in cache yet + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + // not cached, check existance and readiness + bExist, provisionState, _ := c.getStorageAccountState(storageAccountName) + + // account does not exist + if !bExist { + return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName) + } + + // account exists but not ready yet + if provisionState != storage.Succeeded { + // we don't want many attempts to validate the account readiness + // here hence we are locking + counter := 1 + for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; { + time.Sleep(3 * time.Second) + counter = counter + 1 + // check if we passed the max sleep + if counter >= 20 { + return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName) + } + } + + // swapped + defer func() { + c.accounts[storageAccountName].isValidating = 0 + }() + + // short circut the check again. + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + _, provisionState, err := c.getStorageAccountState(storageAccountName) + + if err != nil { + glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) + return false, err + } + + if provisionState == storage.Succeeded { + return true, nil + } + + glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName) + // leave it for next loop/sync loop + return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName) + }) + // we have failed to ensure that account is ready for us to create + // the default vhd container + if err != nil { + return err + } + } + + if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil { + return err + } + + container := blobSvc.GetContainerReference(defaultContainerName) + bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return err + } + if bCreated { + glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName) + } + + // flag so we no longer have to check on ARM + c.accounts[storageAccountName].defaultContainerCreated = true + return nil +} + +// Gets Disk counts per storage account +func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { + // if we have it in cache + if c.accounts[SAName].diskCount != -1 { + return int(c.accounts[SAName].diskCount), nil + } + + var err error + var blobSvc azstorage.BlobStorageClient + + if err = c.ensureDefaultContainer(SAName); err != nil { + return 0, err + } + + if blobSvc, err = c.getBlobSvcClient(SAName); err != nil { + return 0, err + } + params := azstorage.ListBlobsParameters{} + + container := blobSvc.GetContainerReference(defaultContainerName) + response, err := container.ListBlobs(params) + if err != nil { + return 0, err + } + glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) + c.accounts[SAName].diskCount = int32(len(response.Blobs)) + + return int(c.accounts[SAName].diskCount), nil +} + +// shouldInit ensures that we only init the plugin once +// and we only do that in the controller + +func (c *BlobDiskController) shouldInit() bool { + if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") { + swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1) + if swapped { + return true + } + } + return false +} + +func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { + accountListResult, err := c.common.cloud.StorageAccountClient.List() + if err != nil { + return nil, err + } + if accountListResult.Value == nil { + return nil, fmt.Errorf("azureDisk - empty accountListResult") + } + + accounts := make(map[string]*storageAccountState) + for _, v := range *accountListResult.Value { + if strings.Index(*v.Name, storageAccountNameMatch) != 0 { + continue + } + if v.Name == nil || v.Sku == nil { + glog.Infof("azureDisk - accountListResult Name or Sku is nil") + continue + } + glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) + + sastate := &storageAccountState{ + name: *v.Name, + saType: (*v.Sku).Name, + diskCount: -1, + } + + accounts[*v.Name] = sastate + } + + return accounts, nil +} + +func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error { + bExist, _, _ := c.getStorageAccountState(storageAccountName) + if bExist { + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + // Account Does not exist + if !bExist { + if len(c.accounts) == maxStorageAccounts && checkMaxAccounts { + return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) + } + + glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType)) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel) + err := <-errChan + if err != nil { + return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err)) + } + + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + + if !bExist { + // SA Accounts takes time to be provisioned + // so if this account was just created allow it sometime + // before polling + glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status") + time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned + } + + // finally, make sure that we default container is created + // before handing it back over + return c.ensureDefaultContainer(storageAccountName) +} + +// finds a new suitable storageAccount for this disk +func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) { + maxDiskCount := maxDisksPerStorageAccounts + SAName := "" + totalDiskCounts := 0 + countAccounts := 0 // account of this type. + for _, v := range c.accounts { + // filter out any stand-alone disks/accounts + if strings.Index(v.name, storageAccountNameMatch) != 0 { + continue + } + + // note: we compute avge stratified by type. + // this to enable user to grow per SA type to avoid low + //avg utilization on one account type skewing all data. + + if v.saType == storageAccountType { + // compute average + dCount, err := c.getDiskCount(v.name) + if err != nil { + return "", err + } + totalDiskCounts = totalDiskCounts + dCount + countAccounts = countAccounts + 1 + // empty account + if dCount == 0 { + glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) + return v.name, nil // shortcircut, avg is good and no need to adjust + } + // if this account is less allocated + if dCount < maxDiskCount { + maxDiskCount = dCount + SAName = v.name + } + } + } + + // if we failed to find storageaccount + if SAName == "" { + glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + disksAfter := totalDiskCounts + 1 // with the new one! + + avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts) + aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing) + + // avg are not create and we should craete more accounts if we can + if aboveAvg && countAccounts < maxStorageAccounts { + glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + // avergates are not ok and we are at capacity(max storage accounts allowed) + if aboveAvg && countAccounts == maxStorageAccounts { + glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) + } + + // we found a storage accounts && [ avg are ok || we reached max sa count ] + return SAName, nil +} +func (c *BlobDiskController) getNextAccountNum() int { + max := 0 + + for k := range c.accounts { + // filter out accounts that are for standalone + if strings.Index(k, storageAccountNameMatch) != 0 { + continue + } + num := getAccountNumFromName(k) + if num > max { + max = num + } + } + + return max + 1 +} + +func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error { + resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err) + } + + c.removeAccountState(storageAccountName) + + glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName) + return nil +} + +//Gets storage account exist, provisionStatus, Error if any +func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) { + account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName) + if err != nil { + return false, "", err + } + return true, account.AccountProperties.ProvisioningState, nil +} + +func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) { + accountsLock.Lock() + defer accountsLock.Unlock() + + if _, ok := c.accounts[key]; !ok { + c.accounts[key] = state + } +} + +func (c *BlobDiskController) removeAccountState(key string) { + accountsLock.Lock() + defer accountsLock.Unlock() + delete(c.accounts, key) +} + +// pads account num with zeros as needed +func getAccountNameForNum(num int) string { + sNum := strconv.Itoa(num) + missingZeros := 3 - len(sNum) + strZero := "" + for missingZeros > 0 { + strZero = strZero + "0" + missingZeros = missingZeros - 1 + } + + sNum = strZero + sNum + return fmt.Sprintf(storageAccountNamePrefix, sNum) +} + +func getAccountNumFromName(accountName string) int { + nameLen := len(accountName) + num, _ := strconv.Atoi(accountName[nameLen-3:]) + + return num +} + +func createVHDHeader(size uint64) ([]byte, error) { + h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) + b := new(bytes.Buffer) + err := binary.Write(b, binary.BigEndian, h) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func diskNameandSANameFromURI(diskURI string) (string, string, error) { + uri, err := url.Parse(diskURI) + if err != nil { + return "", "", err + } + + hostName := uri.Host + storageAccountName := strings.Split(hostName, ".")[0] + + segments := strings.Split(uri.Path, "/") + diskNameVhd := segments[len(segments)-1] + + return storageAccountName, diskNameVhd, nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go new file mode 100644 index 00000000000..881a7dbb2c4 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -0,0 +1,270 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/types" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/cloudprovider" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/golang/glog" +) + +const ( + defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map) + storageAccountNameTemplate = "pvc%s" + + // for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits + maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks + maxDisksPerStorageAccounts = 60 + storageAccountUtilizationBeforeGrowing = 0.5 + storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount + + maxLUN = 64 // max number of LUNs per VM + errLeaseFailed = "AcquireDiskLeaseFailed" + errLeaseIDMissing = "LeaseIdMissing" + errContainerNotFound = "ContainerNotFound" +) + +var defaultBackOff = kwait.Backoff{ + Steps: 20, + Duration: 2 * time.Second, + Factor: 1.5, + Jitter: 0.0, +} + +type controllerCommon struct { + tenantID string + subscriptionID string + location string + storageEndpointSuffix string + resourceGroup string + clientID string + clientSecret string + managementEndpoint string + tokenEndPoint string + aadResourceEndPoint string + aadToken string + expiresOn time.Time + cloud *Cloud +} + +// AttachDisk attaches a vhd to vm +// the vhd must exist, can be identified by diskName, diskURI, and lun. +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return err + } else if !exists { + return cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + if isManagedDisk { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + ManagedDisk: &compute.ManagedDiskParameters{ + ID: &diskURI, + }, + }) + } else { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Vhd: &compute.VirtualHardDisk{ + URI: &diskURI, + }, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + }) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure attach failed, err: %v", err) + detail := err.Error() + if strings.Contains(detail, errLeaseFailed) { + // if lease cannot be acquired, immediately detach the disk and return the original error + glog.Infof("azureDisk - failed to acquire disk lease, try detach") + c.cloud.DetachDiskByName(diskName, diskURI, nodeName) + } + } else { + glog.V(4).Infof("azureDisk - azure attach succeeded") + } + return err +} + +// DetachDiskByName detaches a vhd from host +// the vhd can be identified by diskName or diskURI +func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil || !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) + return nil + } + + disks := *vm.StorageProfile.DataDisks + bFoundDisk := false + for i, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + disks = append(disks[:i], disks[i+1:]...) + bFoundDisk = true + break + } + } + + if !bFoundDisk { + return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure disk detach failed, err: %v", err) + } else { + glog.V(4).Infof("azureDisk - azure disk detach succeeded") + } + return err +} + +// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI +func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + return *disk.Lun, nil + } + } + return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) +} + +// GetNextDiskLun searches all vhd attachment on the host and find unused lun +// return -1 if all luns are used +func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + used := make([]bool, maxLUN) + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil { + used[*disk.Lun] = true + } + } + for k, v := range used { + if !v { + return int32(k), nil + } + } + return -1, fmt.Errorf("All Luns are used") +} + +// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName +func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { + attached := make(map[string]bool) + for _, diskName := range diskNames { + attached[diskName] = false + } + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", + nodeName, diskNames) + return attached, nil + } else if err != nil { + return attached, err + } + + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + for _, diskName := range diskNames { + if disk.Name != nil && diskName != "" && *disk.Name == diskName { + attached[diskName] = true + } + } + } + + return attached, nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_file.go b/pkg/cloudprovider/providers/azure/azure_file.go index ccdca622a46..48291128324 100644 --- a/pkg/cloudprovider/providers/azure/azure_file.go +++ b/pkg/cloudprovider/providers/azure/azure_file.go @@ -18,9 +18,13 @@ package azure import ( "fmt" - "strconv" azs "github.com/Azure/azure-sdk-for-go/storage" + "github.com/golang/glog" +) + +const ( + useHTTPS = true ) // create file share @@ -34,11 +38,15 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in // setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare // receives error "The metadata specified is invalid. It has characters that are not permitted." // As a result,breaking into two API calls: create share and set quota - if err = fileClient.CreateShare(name, nil); err != nil { + share := fileClient.GetShareReference(name) + if err = share.Create(nil); err != nil { return fmt.Errorf("failed to create file share, err: %v", err) } - if err = fileClient.SetShareProperties(name, azs.ShareHeaders{Quota: strconv.Itoa(sizeGB)}); err != nil { - az.deleteFileShare(accountName, accountKey, name) + share.Properties.Quota = sizeGB + if err = share.SetProperties(nil); err != nil { + if err := share.Delete(nil); err != nil { + glog.Errorf("Error deleting share: %v", err) + } return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) } return nil @@ -48,9 +56,10 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error { fileClient, err := az.getFileSvcClient(accountName, accountKey) if err == nil { - return fileClient.DeleteShare(name) + share := fileClient.GetShareReference(name) + return share.Delete(nil) } - return err + return nil } func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) { diff --git a/pkg/cloudprovider/providers/azure/azure_instance_metadata.go b/pkg/cloudprovider/providers/azure/azure_instance_metadata.go new file mode 100644 index 00000000000..7a418c46453 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_instance_metadata.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// This is just for tests injection +var metadataURL = "http://169.254.169.254/metadata" + +// SetMetadataURLForTesting is used to modify the URL used for +// accessing the metadata server. Should only be used for testing! +func SetMetadataURLForTesting(url string) { + metadataURL = url +} + +// NetworkMetadata contains metadata about an instance's network +type NetworkMetadata struct { + Interface []NetworkInterface `json:"interface"` +} + +// NetworkInterface represents an instances network interface. +type NetworkInterface struct { + IPV4 NetworkData `json:"ipv4"` + IPV6 NetworkData `json:"ipv6"` + MAC string `json:"macAddress"` +} + +// NetworkData contains IP information for a network. +type NetworkData struct { + IPAddress []IPAddress `json:"ipAddress"` + Subnet []Subnet `json:"subnet"` +} + +// IPAddress represents IP address information. +type IPAddress struct { + PrivateIP string `json:"privateIPAddress"` + PublicIP string `json:"publicIPAddress"` +} + +// Subnet represents subnet information. +type Subnet struct { + Address string `json:"address"` + Prefix string `json:"prefix"` +} + +// QueryMetadataJSON queries the metadata server and populates the passed in object +func QueryMetadataJSON(path string, obj interface{}) error { + data, err := queryMetadataBytes(path, "json") + if err != nil { + return err + } + return json.Unmarshal(data, obj) +} + +// QueryMetadataText queries the metadata server and returns the corresponding text +func QueryMetadataText(path string) (string, error) { + data, err := queryMetadataBytes(path, "text") + if err != nil { + return "", err + } + return string(data), err +} + +func queryMetadataBytes(path, format string) ([]byte, error) { + client := &http.Client{} + + req, err := http.NewRequest("GET", metadataURL+path, nil) + if err != nil { + return nil, err + } + req.Header.Add("Metadata", "True") + + q := req.URL.Query() + q.Add("format", format) + q.Add("api-version", "2017-04-02") + req.URL.RawQuery = q.Encode() + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ioutil.ReadAll(resp.Body) +} diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index e624a08d463..256428d8a39 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -29,6 +29,16 @@ import ( // NodeAddresses returns the addresses of the specified instance. func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { + if az.UseInstanceMetadata { + text, err := QueryMetadataText("instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress") + if err != nil { + return nil, err + } + return []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: text}, + {Type: v1.NodeHostName, Address: string(name)}, + }, nil + } ip, err := az.getIPForMachine(name) if err != nil { glog.Errorf("error: az.NodeAddresses, az.getIPForMachine(%s), err=%v", name, err) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 4cfede07729..9b959b4e4cb 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -143,8 +143,10 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil sg.SecurityGroupPropertiesFormat.Subnets = nil az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: sg(%s) - updating", serviceName, *sg.Name) retryErr := az.CreateOrUpdateSGWithRetry(sg) if retryErr != nil { @@ -222,8 +224,10 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod if !existsLb || lbNeedsUpdate { glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: lb(%s) - updating", serviceName, lbName) retryErr := az.CreateOrUpdateLBWithRetry(lb) if retryErr != nil { @@ -315,8 +319,10 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil sg.SecurityGroupPropertiesFormat.Subnets = nil az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) retryErr := az.CreateOrUpdateSGWithRetry(reconciledSg) if retryErr != nil { @@ -369,8 +375,10 @@ func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, is if len(*lb.FrontendIPConfigurations) > 0 { glog.V(3).Infof("delete(%s): lb(%s) - updating", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) retryErr := az.CreateOrUpdateLBWithRetry(lb) if retryErr != nil { @@ -385,7 +393,9 @@ func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, is glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + resp := <-respChan + err := <-errChan if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { glog.V(2).Infof("delete(%s) backing off: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) retryErr := az.DeleteLBWithRetry(lbName) @@ -440,8 +450,10 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) + resp := <-respChan + err = <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: pip(%s) - creating", serviceName, *pip.Name) retryErr := az.CreateOrUpdatePIPWithRetry(pip) if retryErr != nil { @@ -466,8 +478,9 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error { glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) az.operationPollRateLimiter.Accept() - resp, deleteErr := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, deleteErr) { + resp, deleteErrChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + deleteErr := <-deleteErrChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(<-resp, deleteErr) { glog.V(2).Infof("ensure(%s) backing off: pip(%s) - deleting", serviceName, pipName) retryErr := az.DeletePublicIPWithRetry(pipName) if retryErr != nil { @@ -772,8 +785,8 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr("*"), - Access: network.Allow, - Direction: network.Inbound, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, }, } } @@ -918,8 +931,10 @@ func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, b glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) az.operationPollRateLimiter.Accept() - resp, err := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) retryErr := az.CreateOrUpdateInterfaceWithRetry(nic) if retryErr != nil { diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go new file mode 100644 index 00000000000..5acdf583583 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/disk" + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + kwait "k8s.io/apimachinery/pkg/util/wait" +) + +//ManagedDiskController : managed disk controller struct +type ManagedDiskController struct { + common *controllerCommon +} + +func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) { + return &ManagedDiskController{common: common}, nil +} + +//CreateManagedDisk : create managed disk +func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) { + glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + + newTags := make(map[string]*string) + azureDDTag := "kubernetes-azure-dd" + newTags["created-by"] = &azureDDTag + + // insert original tags to newTags + if tags != nil { + for k, v := range tags { + // Azure won't allow / (forward slash) in tags + newKey := strings.Replace(k, "/", "-", -1) + newValue := strings.Replace(v, "/", "-", -1) + newTags[newKey] = &newValue + } + } + + diskSizeGB := int32(sizeGB) + model := disk.Model{ + Location: &c.common.location, + Tags: &newTags, + Properties: &disk.Properties{ + AccountType: disk.StorageAccountTypes(storageAccountType), + DiskSizeGB: &diskSizeGB, + CreationData: &disk.CreationData{CreateOption: disk.Empty}, + }} + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel) + <-respChan + err := <-errChan + if err != nil { + return "", err + } + + diskID := "" + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + provisonState, id, err := c.getDisk(diskName) + diskID = id + // We are waiting for provisioningState==Succeeded + // We don't want to hand-off managed disks to k8s while they are + //still being provisioned, this is to avoid some race conditions + if err != nil { + return false, err + } + if strings.ToLower(provisonState) == "succeeded" { + return true, nil + } + return false, nil + }) + + if err != nil { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB) + } else { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + } + + return diskID, nil +} + +//DeleteManagedDisk : delete managed disk +func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { + diskName := path.Base(diskURI) + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel) + <-respChan + err := <-errChan + if err != nil { + return err + } + // We don't need poll here, k8s will immediatly stop referencing the disk + // the disk will be evantually deleted - cleanly - by ARM + + glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + + return nil +} + +// return: disk provisionState, diskID, error +func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) { + result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName) + if err != nil { + return "", "", err + } + + if result.Properties != nil && (*result.Properties).ProvisioningState != nil { + return *(*result.Properties).ProvisioningState, *result.ID, nil + } + + return "", "", err +} diff --git a/pkg/cloudprovider/providers/azure/azure_routes.go b/pkg/cloudprovider/providers/azure/azure_routes.go index 0d7a23ebfd8..b7cb4ae810f 100644 --- a/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/pkg/cloudprovider/providers/azure/azure_routes.go @@ -78,8 +78,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName) az.operationPollRateLimiter.Accept() - resp, err := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName) retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable) if retryErr != nil { @@ -114,8 +116,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) + resp := <-respChan + err = <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.CreateOrUpdateRouteWithRetry(route) if retryErr != nil { @@ -138,7 +142,10 @@ func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.DeleteRouteWithRetry(routeName) diff --git a/pkg/cloudprovider/providers/azure/azure_storage.go b/pkg/cloudprovider/providers/azure/azure_storage.go index b810480ab47..8572b9c779d 100644 --- a/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/pkg/cloudprovider/providers/azure/azure_storage.go @@ -18,260 +18,10 @@ package azure import ( "fmt" - "strings" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/volume" ) -const ( - maxLUN = 64 // max number of LUNs per VM - errLeaseFailed = "AcquireDiskLeaseFailed" - errLeaseIDMissing = "LeaseIdMissing" - errContainerNotFound = "ContainerNotFound" -) - -// AttachDisk attaches a vhd to vm -// the vhd must exist, can be identified by diskName, diskURI, and lun. -func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return err - } else if !exists { - return cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - disks = append(disks, - compute.DataDisk{ - Name: &diskName, - Vhd: &compute.VirtualHardDisk{ - URI: &diskURI, - }, - Lun: &lun, - Caching: cachingMode, - CreateOption: "attach", - }) - - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure attach failed, err: %v", err) - detail := err.Error() - if strings.Contains(detail, errLeaseFailed) { - // if lease cannot be acquired, immediately detach the disk and return the original error - glog.Infof("failed to acquire disk lease, try detach") - az.DetachDiskByName(diskName, diskURI, nodeName) - } - } else { - glog.V(4).Infof("azure attach succeeded") - } - return err -} - -// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName -func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { - attached := make(map[string]bool) - for _, diskName := range diskNames { - attached[diskName] = false - } - vm, exists, err := az.getVirtualMachine(nodeName) - if !exists { - // if host doesn't exist, no need to detach - glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", - nodeName, diskNames) - return attached, nil - } else if err != nil { - return attached, err - } - - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - for _, diskName := range diskNames { - if disk.Name != nil && diskName != "" && *disk.Name == diskName { - attached[diskName] = true - } - } - } - - return attached, nil -} - -// DetachDiskByName detaches a vhd from host -// the vhd can be identified by diskName or diskURI -func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil || !exists { - // if host doesn't exist, no need to detach - glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName) - return nil - } - - disks := *vm.StorageProfile.DataDisks - for i, disk := range disks { - if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI) - disks = append(disks[:i], disks[i+1:]...) - break - } - } - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure disk detach failed, err: %v", err) - } else { - glog.V(4).Infof("azure disk detach succeeded") - } - return err -} - -// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI -func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) - return *disk.Lun, nil - } - } - return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) -} - -// GetNextDiskLun searches all vhd attachment on the host and find unused lun -// return -1 if all luns are used -func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - used := make([]bool, maxLUN) - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil { - used[*disk.Lun] = true - } - } - for k, v := range used { - if !v { - return int32(k), nil - } - } - return -1, fmt.Errorf("All Luns are used") -} - -// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. -// If no storage account is given, search all the storage accounts associated with the resource group and pick one that -// fits storage type and location. -func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - var err error - accounts := []accountWithLocation{} - if len(storageAccount) > 0 { - accounts = append(accounts, accountWithLocation{Name: storageAccount}) - } else { - // find a storage account - accounts, err = az.getStorageAccounts() - if err != nil { - // TODO: create a storage account and container - return "", "", 0, err - } - } - for _, account := range accounts { - glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location) - if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 { - // find the access key with this account - key, err := az.getStorageAccesskey(account.Name) - if err != nil { - glog.V(2).Infof("no key found for storage account %s", account.Name) - continue - } - - // create a page blob in this account's vhd container - name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil) - if err != nil { - glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err) - continue - } - glog.V(4).Infof("created vhd blob uri: %s", uri) - return name, uri, requestGB, err - } - } - return "", "", 0, fmt.Errorf("failed to find a matching storage account") -} - -// DeleteVolume deletes a VHD blob -func (az *Cloud) DeleteVolume(name, uri string) error { - accountName, blob, err := az.getBlobNameAndAccountFromURI(uri) - if err != nil { - return fmt.Errorf("failed to parse vhd URI %v", err) - } - key, err := az.getStorageAccesskey(accountName) - if err != nil { - return fmt.Errorf("no key for storage account %s, err %v", accountName, err) - } - err = az.deleteVhdBlob(accountName, key, blob) - if err != nil { - glog.Warningf("failed to delete blob %s err: %v", uri, err) - detail := err.Error() - if strings.Contains(detail, errLeaseIDMissing) { - // disk is still being used - // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx - return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name)) - } - return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err) - } - glog.V(4).Infof("blob %s deleted", uri) - return nil - -} - // CreateFileShare creates a file share, using a matching storage account func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) { var err error diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 788be6451c8..9b9050579eb 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -17,7 +17,11 @@ limitations under the License. package azure import ( + "encoding/json" "fmt" + "net/http" + "net/http/httptest" + "reflect" "strings" "testing" @@ -556,7 +560,7 @@ func TestProtocolTranslationTCP(t *testing.T) { if *transportProto != network.TransportProtocolTCP { t.Errorf("Expected TCP LoadBalancer Rule Protocol. Got %v", transportProto) } - if *securityGroupProto != network.TCP { + if *securityGroupProto != network.SecurityRuleProtocolTCP { t.Errorf("Expected TCP SecurityGroup Protocol. Got %v", transportProto) } if *probeProto != network.ProbeProtocolTCP { @@ -570,7 +574,7 @@ func TestProtocolTranslationUDP(t *testing.T) { if *transportProto != network.TransportProtocolUDP { t.Errorf("Expected UDP LoadBalancer Rule Protocol. Got %v", transportProto) } - if *securityGroupProto != network.UDP { + if *securityGroupProto != network.SecurityRuleProtocolUDP { t.Errorf("Expected UDP SecurityGroup Protocol. Got %v", transportProto) } if probeProto != nil { @@ -585,6 +589,8 @@ func TestNewCloudFromJSON(t *testing.T) { "subscriptionId": "--subscription-id--", "aadClientId": "--aad-client-id--", "aadClientSecret": "--aad-client-secret--", + "aadClientCertPath": "--aad-client-cert-path--", + "aadClientCertPassword": "--aad-client-cert-password--", "resourceGroup": "--resource-group--", "location": "--location--", "subnetName": "--subnet-name--", @@ -606,15 +612,20 @@ func TestNewCloudFromJSON(t *testing.T) { // Test Backoff and Rate Limit defaults (json) func TestCloudDefaultConfigFromJSON(t *testing.T) { - config := `{}` + config := `{ + "aadClientId": "--aad-client-id--", + "aadClientSecret": "--aad-client-secret--" + }` validateEmptyConfig(t, config) } // Test Backoff and Rate Limit defaults (yaml) func TestCloudDefaultConfigFromYAML(t *testing.T) { - config := `` - + config := ` +aadClientId: --aad-client-id-- +aadClientSecret: --aad-client-secret-- +` validateEmptyConfig(t, config) } @@ -625,6 +636,8 @@ tenantId: --tenant-id-- subscriptionId: --subscription-id-- aadClientId: --aad-client-id-- aadClientSecret: --aad-client-secret-- +aadClientCertPath: --aad-client-cert-path-- +aadClientCertPassword: --aad-client-cert-password-- resourceGroup: --resource-group-- location: --location-- subnetName: --subnet-name-- @@ -659,6 +672,12 @@ func validateConfig(t *testing.T, config string) { if azureCloud.AADClientSecret != "--aad-client-secret--" { t.Errorf("got incorrect value for AADClientSecret") } + if azureCloud.AADClientCertPath != "--aad-client-cert-path--" { + t.Errorf("got incorrect value for AADClientCertPath") + } + if azureCloud.AADClientCertPassword != "--aad-client-cert-password--" { + t.Errorf("got incorrect value for AADClientCertPassword") + } if azureCloud.ResourceGroup != "--resource-group--" { t.Errorf("got incorrect value for ResourceGroup") } @@ -801,3 +820,60 @@ func TestSplitProviderID(t *testing.T) { } } + +func TestMetadataParsing(t *testing.T) { + data := ` +{ + "interface": [ + { + "ipv4": { + "ipAddress": [ + { + "privateIpAddress": "10.0.1.4", + "publicIpAddress": "X.X.X.X" + } + ], + "subnet": [ + { + "address": "10.0.1.0", + "prefix": "24" + } + ] + }, + "ipv6": { + "ipAddress": [ + + ] + }, + "macAddress": "002248020E1E" + } + ] +} +` + + network := NetworkMetadata{} + if err := json.Unmarshal([]byte(data), &network); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + ip := network.Interface[0].IPV4.IPAddress[0].PrivateIP + if ip != "10.0.1.4" { + t.Errorf("Unexpected value: %s, expected 10.0.1.4", ip) + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, data) + })) + defer server.Close() + + SetMetadataURLForTesting(server.URL) + + networkJSON := NetworkMetadata{} + if err := QueryMetadataJSON("/some/path", &networkJSON); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(network, networkJSON) { + t.Errorf("Unexpected inequality:\n%#v\nvs\n%#v", network, networkJSON) + } +} diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index d8d5b113def..5f59da85918 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -19,7 +19,9 @@ package azure import ( "errors" "fmt" + "hash/crc32" "regexp" + "strconv" "strings" "k8s.io/api/core/v1" @@ -135,12 +137,12 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor switch protocol { case v1.ProtocolTCP: transportProto = network.TransportProtocolTCP - securityProto = network.TCP + securityProto = network.SecurityRuleProtocolTCP probeProto = network.ProbeProtocolTCP return &transportProto, &securityProto, &probeProto, nil case v1.ProtocolUDP: transportProto = network.TransportProtocolUDP - securityProto = network.UDP + securityProto = network.SecurityRuleProtocolUDP return &transportProto, &securityProto, nil, nil default: return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers") @@ -293,3 +295,58 @@ func splitProviderID(providerID string) (types.NodeName, error) { } return types.NodeName(matches[1]), nil } + +var polyTable = crc32.MakeTable(crc32.Koopman) + +//MakeCRC32 : convert string to CRC32 format +func MakeCRC32(str string) string { + crc := crc32.New(polyTable) + crc.Write([]byte(str)) + hash := crc.Sum32() + return strconv.FormatUint(uint64(hash), 10) +} + +//ExtractVMData : extract dataDisks, storageProfile from a map struct +func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{}, + storageProfile map[string]interface{}, + hardwareProfile map[string]interface{}, err error) { + props, ok := vmData["properties"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error") + } + + storageProfile, ok = props["storageProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error") + } + + hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error") + } + + dataDisks, ok = storageProfile["dataDisks"].([]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error") + } + return dataDisks, storageProfile, hardwareProfile, nil +} + +//ExtractDiskData : extract provisioningState, diskState from a map struct +func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) { + fragment, ok := diskData.(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData to map error") + } + + properties, ok := fragment["properties"].(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData(properties) to map error") + } + + provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there + if ref, ok := properties["diskState"]; ok { + diskState = ref.(string) + } + return provisioningState, diskState, nil +} diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 3b97c50e768..30d47605797 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -55,6 +55,8 @@ go_library( "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", + "//vendor/google.golang.org/api/cloudkms/v1:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/container/v1:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 90e2570a429..a43740151c8 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "cloud.google.com/go/compute/metadata" + gcfg "gopkg.in/gcfg.v1" - "gopkg.in/gcfg.v1" + "cloud.google.com/go/compute/metadata" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -38,6 +38,8 @@ import ( "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" + cloudkms "google.golang.org/api/cloudkms/v1" + computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" @@ -74,6 +76,8 @@ const ( gceHcHealthyThreshold = int64(1) // Defaults to 5 * 2 = 10 seconds before the LB will steer traffic away gceHcUnhealthyThreshold = int64(5) + + gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/" ) // GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. @@ -84,7 +88,9 @@ type GCECloud struct { service *compute.Service serviceBeta *computebeta.Service + serviceAlpha *computealpha.Service containerService *container.Service + cloudkmsService *cloudkms.Service clientBuilder controller.ControllerClientBuilder projectID string region string @@ -137,6 +143,8 @@ type Config struct { NodeTags []string `gcfg:"node-tags"` NodeInstancePrefix string `gcfg:"node-instance-prefix"` Multizone bool `gcfg:"multizone"` + // Specifying ApiEndpoint will override the default GCE compute API endpoint. + ApiEndpoint string `gcfg:"api-endpoint"` } } @@ -153,8 +161,19 @@ func (g *GCECloud) GetComputeService() *compute.Service { return g.service } +// Raw access to the cloudkmsService of GCE cloud. Required for encryption of etcd using Google KMS. +func (g *GCECloud) GetKMSService() *cloudkms.Service { + return g.cloudkmsService +} + +// Returns the ProjectID corresponding to the project this cloud is in. +func (g *GCECloud) GetProjectID() string { + return g.projectID +} + // newGCECloud creates a new instance of GCECloud. func newGCECloud(config io.Reader) (*GCECloud, error) { + apiEndpoint := "" projectID, zone, err := getProjectAndZone() if err != nil { return nil, err @@ -169,7 +188,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { if err != nil { return nil, err } - networkURL := gceNetworkURL(projectID, networkName) + networkURL := gceNetworkURL("", projectID, networkName) subnetworkURL := "" // By default, Kubernetes clusters only run against one zone @@ -185,22 +204,23 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { return nil, err } glog.Infof("Using GCE provider config %+v", cfg) + if cfg.Global.ApiEndpoint != "" { + apiEndpoint = cfg.Global.ApiEndpoint + } if cfg.Global.ProjectID != "" { projectID = cfg.Global.ProjectID } - if cfg.Global.NetworkName != "" { - if strings.Contains(cfg.Global.NetworkName, "/") { - networkURL = cfg.Global.NetworkName - } else { - networkURL = gceNetworkURL(cfg.Global.ProjectID, cfg.Global.NetworkName) - } + + if cfg.Global.NetworkName != "" && strings.Contains(cfg.Global.NetworkName, "/") { + networkURL = cfg.Global.NetworkName + } else { + networkURL = gceNetworkURL(apiEndpoint, projectID, networkName) } - if cfg.Global.SubnetworkName != "" { - if strings.Contains(cfg.Global.SubnetworkName, "/") { - subnetworkURL = cfg.Global.SubnetworkName - } else { - subnetworkURL = gceSubnetworkURL(cfg.Global.ProjectID, region, cfg.Global.SubnetworkName) - } + + if cfg.Global.SubnetworkName != "" && strings.Contains(cfg.Global.SubnetworkName, "/") { + subnetworkURL = cfg.Global.SubnetworkName + } else { + subnetworkURL = gceSubnetworkURL(apiEndpoint, cfg.Global.ProjectID, region, cfg.Global.SubnetworkName) } if cfg.Global.TokenURL != "" { tokenSource = NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) @@ -212,7 +232,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { } } - return CreateGCECloud(projectID, region, zone, managedZones, networkURL, subnetworkURL, + return CreateGCECloud(apiEndpoint, projectID, region, zone, managedZones, networkURL, subnetworkURL, nodeTags, nodeInstancePrefix, tokenSource, true /* useMetadataServer */) } @@ -220,36 +240,62 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. -func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL, subnetworkURL string, nodeTags []string, +func CreateGCECloud(apiEndpoint, projectID, region, zone string, managedZones []string, networkURL, subnetworkURL string, nodeTags []string, nodeInstancePrefix string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*GCECloud, error) { client, err := newOauthClient(tokenSource) if err != nil { return nil, err } - service, err := compute.New(client) if err != nil { return nil, err } client, err = newOauthClient(tokenSource) + if err != nil { + return nil, err + } serviceBeta, err := computebeta.New(client) if err != nil { return nil, err } + client, err = newOauthClient(tokenSource) + if err != nil { + return nil, err + } + serviceAlpha, err := computealpha.New(client) + if err != nil { + return nil, err + } + + // Expect override api endpoint to always be v1 api and follows the same pattern as prod. + // Generate alpha and beta api endpoints based on override v1 api endpoint. + // For example, + // staging API endpoint: https://www.googleapis.com/compute/staging_v1/ + if apiEndpoint != "" { + service.BasePath = fmt.Sprintf("%sprojects/", apiEndpoint) + serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(apiEndpoint, "v1", "beta", 0)) + serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(apiEndpoint, "v1", "alpha", 0)) + } + containerService, err := container.New(client) if err != nil { return nil, err } + cloudkmsService, err := cloudkms.New(client) + if err != nil { + return nil, err + } + if networkURL == "" { networkName, err := getNetworkNameViaAPICall(service, projectID) if err != nil { return nil, err } - networkURL = gceNetworkURL(projectID, networkName) + networkURL = gceNetworkURL(apiEndpoint, projectID, networkName) } networkProjectID, err := getProjectIDInURL(networkURL) @@ -274,6 +320,7 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo service: service, serviceBeta: serviceBeta, containerService: containerService, + cloudkmsService: cloudkmsService, projectID: projectID, networkProjectID: networkProjectID, onXPN: onXPN, @@ -365,12 +412,18 @@ func (gce *GCECloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut [] // GCECloud implements cloudprovider.Interface. var _ cloudprovider.Interface = (*GCECloud)(nil) -func gceNetworkURL(project, network string) string { - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s", project, network) +func gceNetworkURL(apiEndpoint, project, network string) string { + if apiEndpoint == "" { + apiEndpoint = gceComputeAPIEndpoint + } + return apiEndpoint + strings.Join([]string{"projects", project, "global", "networks", network}, "/") } -func gceSubnetworkURL(project, region, subnetwork string) string { - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/subnetworks/%s", project, region, subnetwork) +func gceSubnetworkURL(apiEndpoint, project, region, subnetwork string) string { + if apiEndpoint == "" { + apiEndpoint = gceComputeAPIEndpoint + } + return apiEndpoint + strings.Join([]string{"projects", project, "regions", region, "subnetworks", subnetwork}, "/") } // getProjectIDInURL parses typical full resource URLS and shorter URLS diff --git a/pkg/cloudprovider/providers/gce/gce_addresses.go b/pkg/cloudprovider/providers/gce/gce_addresses.go index 877d744ddfa..3592fcf3b6c 100644 --- a/pkg/cloudprovider/providers/gce/gce_addresses.go +++ b/pkg/cloudprovider/providers/gce/gce_addresses.go @@ -33,18 +33,13 @@ func newAddressMetricContext(request, region string) *metricContext { // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule. -func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) (*compute.Address, error) { +func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error { mc := newAddressMetricContext("reserve", "") op, err := gce.service.GlobalAddresses.Insert(gce.projectID, addr).Do() if err != nil { - return nil, mc.Observe(err) + return mc.Observe(err) } - - if err := gce.waitForGlobalOp(op, mc); err != nil { - return nil, err - } - - return gce.GetGlobalAddress(addr.Name) + return gce.waitForGlobalOp(op, mc) } // DeleteGlobalAddress deletes a global address by name. @@ -65,17 +60,13 @@ func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) { } // ReserveRegionAddress creates a region address -func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) (*compute.Address, error) { +func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error { mc := newAddressMetricContext("reserve", region) op, err := gce.service.Addresses.Insert(gce.projectID, region, addr).Do() if err != nil { - return nil, mc.Observe(err) + return mc.Observe(err) } - if err := gce.waitForRegionOp(op, region, mc); err != nil { - return nil, err - } - - return gce.GetRegionAddress(addr.Name, region) + return gce.waitForRegionOp(op, region, mc) } // DeleteRegionAddress deletes a region address by name. diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index d91bdec664a..c916989b5ee 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" @@ -41,7 +40,7 @@ const ( DiskTypeStandard = "pd-standard" diskTypeDefault = DiskTypeStandard - diskTypeUriTemplate = "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/%s" + diskTypeUriTemplate = "%s/zones/%s/diskTypes/%s" ) // Disks is interface for manipulation with GCE PDs. @@ -234,7 +233,12 @@ func (gce *GCECloud) CreateDisk( default: return fmt.Errorf("invalid GCE disk type %q", diskType) } - diskTypeUri := fmt.Sprintf(diskTypeUriTemplate, gce.projectID, zone, diskType) + + projectsApiEndpoint := gceComputeAPIEndpoint + "projects/" + if gce.service != nil { + projectsApiEndpoint = gce.service.BasePath + } + diskTypeUri := projectsApiEndpoint + fmt.Sprintf(diskTypeUriTemplate, gce.projectID, zone, diskType) diskToCreate := &compute.Disk{ Name: name, @@ -424,9 +428,8 @@ func (gce *GCECloud) convertDiskToAttachedDisk(disk *GCEDisk, readWrite string) DeviceName: disk.Name, Kind: disk.Kind, Mode: readWrite, - Source: "https://" + path.Join( - "www.googleapis.com/compute/v1/projects/", - gce.projectID, "zones", disk.Zone, "disks", disk.Name), + Source: gce.service.BasePath + strings.Join([]string{ + gce.projectID, "zones", disk.Zone, "disks", disk.Name}, "/"), Type: "PERSISTENT", } } diff --git a/pkg/cloudprovider/providers/gce/gce_disks_test.go b/pkg/cloudprovider/providers/gce/gce_disks_test.go index 7333776ef7a..5ff88d65c0b 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks_test.go +++ b/pkg/cloudprovider/providers/gce/gce_disks_test.go @@ -43,7 +43,7 @@ func TestCreateDisk_Basic(t *testing.T) { tags := make(map[string]string) tags["test-tag"] = "test-value" - diskTypeUri := fmt.Sprintf(diskTypeUriTemplate, projectId, zone, diskType) + diskTypeUri := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeUriTemplate, projectId, zone, diskType) expectedDescription := "{\"test-tag\":\"test-value\"}" /* Act */ diff --git a/pkg/cloudprovider/providers/gce/gce_instances.go b/pkg/cloudprovider/providers/gce/gce_instances.go index 94262134479..8d997ae9ed4 100644 --- a/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/pkg/cloudprovider/providers/gce/gce_instances.go @@ -25,7 +25,7 @@ import ( "cloud.google.com/go/compute/metadata" "github.com/golang/glog" - computealpha "google.golang.org/api/compute/v0.beta" + computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" @@ -69,7 +69,7 @@ func getZone(n *v1.Node) string { // ToInstanceReferences returns instance references by links func (gce *GCECloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { for _, ins := range instanceNames { - instanceLink := makeHostURL(gce.projectID, zone, ins) + instanceLink := makeHostURL(gce.service.BasePath, gce.projectID, zone, ins) refs = append(refs, &compute.InstanceReference{Instance: instanceLink}) } return refs @@ -303,7 +303,7 @@ func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err e return } - var res *computealpha.Instance + var res *computebeta.Instance res, err = gce.serviceBeta.Instances.Get( gce.projectID, instance.Zone, instance.Name).Do() if err != nil { diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 79bfb853436..952370252e3 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -483,7 +483,7 @@ func (gce *GCECloud) createTargetPool(name, serviceName, ipAddress, region, clus var instances []string for _, host := range hosts { - instances = append(instances, makeHostURL(gce.projectID, host.Zone, host.Name)) + instances = append(instances, makeHostURL(gce.service.BasePath, gce.projectID, host.Zone, host.Name)) } glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) pool := &compute.TargetPool{ @@ -542,7 +542,7 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, existing sets.Str } func (gce *GCECloud) targetPoolURL(name, region string) string { - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/targetPools/%s", gce.projectID, region, name) + return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", region, "targetPools", name}, "/") } func makeHttpHealthCheck(name, path string, port int32) *compute.HttpHealthCheck { @@ -671,10 +671,9 @@ func nodeNames(nodes []*v1.Node) []string { return ret } -func makeHostURL(projectID, zone, host string) string { +func makeHostURL(projectsApiEndpoint, projectID, zone, host string) string { host = canonicalizeInstanceName(host) - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", - projectID, zone, host) + return projectsApiEndpoint + strings.Join([]string{projectID, "zones", zone, "instances", host}, "/") } func hostURLToComparablePath(hostURL string) string { @@ -922,8 +921,7 @@ func (gce *GCECloud) ensureStaticIP(name, serviceName, region, existingIP string addressObj.Address = existingIP } - address, err := gce.ReserveRegionAddress(addressObj, region) - if err != nil { + if err = gce.ReserveRegionAddress(addressObj, region); err != nil { if !isHTTPErrorCode(err, http.StatusConflict) { return "", false, fmt.Errorf("error creating gce static IP address: %v", err) } @@ -931,5 +929,10 @@ func (gce *GCECloud) ensureStaticIP(name, serviceName, region, existingIP string existed = true } - return address.Address, existed, nil + addr, err := gce.GetRegionAddress(name, region) + if err != nil { + return "", false, fmt.Errorf("error getting static IP address: %v", err) + } + + return addr.Address, existed, nil } diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index a663e9a44e7..dd3ba70f764 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -624,7 +624,7 @@ func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1 } func (gce *GCECloud) getBackendServiceLink(name string) string { - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/backendServices/%s", gce.projectID, gce.region, name) + return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "backendServices", name}, "/") } func getNameFromLink(link string) string { diff --git a/pkg/cloudprovider/providers/gce/gce_routes.go b/pkg/cloudprovider/providers/gce/gce_routes.go index b20e3c82eb1..c5d3a37c245 100644 --- a/pkg/cloudprovider/providers/gce/gce_routes.go +++ b/pkg/cloudprovider/providers/gce/gce_routes.go @@ -102,7 +102,7 @@ func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *clo }).Do() if err != nil { if isHTTPErrorCode(err, http.StatusConflict) { - glog.Info("Route %v already exists.") + glog.Infof("Route %v already exists.", routeName) return nil } else { return mc.Observe(err) diff --git a/pkg/cloudprovider/providers/gce/gce_zones.go b/pkg/cloudprovider/providers/gce/gce_zones.go index 881fe652fc1..3179e757892 100644 --- a/pkg/cloudprovider/providers/gce/gce_zones.go +++ b/pkg/cloudprovider/providers/gce/gce_zones.go @@ -23,6 +23,7 @@ import ( compute "google.golang.org/api/compute/v1" "k8s.io/kubernetes/pkg/cloudprovider" + "strings" ) func newZonesMetricContext(request, region string) *metricContext { @@ -52,5 +53,5 @@ func (gce *GCECloud) ListZonesInRegion(region string) ([]*compute.Zone, error) { } func (gce *GCECloud) getRegionLink(region string) string { - return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%v/regions/%v", gce.projectID, region) + return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", region}, "/") } diff --git a/pkg/cloudprovider/providers/mesos/BUILD b/pkg/cloudprovider/providers/mesos/BUILD deleted file mode 100644 index 5f42a8e16a2..00000000000 --- a/pkg/cloudprovider/providers/mesos/BUILD +++ /dev/null @@ -1,67 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "config.go", - "mesos.go", - "plugins.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/mesos/mesos-go/detector:go_default_library", - "//vendor/github.com/mesos/mesos-go/detector/zoo:go_default_library", - "//vendor/github.com/mesos/mesos-go/mesosproto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/gopkg.in/gcfg.v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "client_test.go", - "config_test.go", - "mesos_test.go", - ], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/cloudprovider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/mesos/mesos-go/detector:go_default_library", - "//vendor/github.com/mesos/mesos-go/mesosutil:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/cloudprovider/providers/mesos/client.go b/pkg/cloudprovider/providers/mesos/client.go deleted file mode 100644 index 04af97ee8fd..00000000000 --- a/pkg/cloudprovider/providers/mesos/client.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "sync" - "time" - - log "github.com/golang/glog" - "github.com/mesos/mesos-go/detector" - mesos "github.com/mesos/mesos-go/mesosproto" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - utilnet "k8s.io/apimachinery/pkg/util/net" -) - -const defaultClusterName = "mesos" - -var noLeadingMasterError = errors.New("there is no current leading master available to query") - -type mesosClient struct { - masterLock sync.RWMutex - master string // host:port formatted address - httpClient *http.Client - tr *http.Transport - initialMaster <-chan struct{} // signal chan, closes once an initial, non-nil master is found - state *stateCache -} - -type slaveNode struct { - hostname string - kubeletRunning bool - resources *v1.NodeResources -} - -type mesosState struct { - clusterName string - nodes map[string]*slaveNode // by hostname -} - -type stateCache struct { - sync.Mutex - expiresAt time.Time - cached *mesosState - err error - ttl time.Duration - refill func(context.Context) (*mesosState, error) -} - -// reloadCache reloads the state cache if it has expired. -func (c *stateCache) reloadCache(ctx context.Context) { - now := time.Now() - c.Lock() - defer c.Unlock() - if c.expiresAt.Before(now) { - log.V(4).Infof("Reloading cached Mesos state") - c.cached, c.err = c.refill(ctx) - c.expiresAt = now.Add(c.ttl) - } else { - log.V(4).Infof("Using cached Mesos state") - } -} - -// cachedState returns the cached Mesos state. -func (c *stateCache) cachedState(ctx context.Context) (*mesosState, error) { - c.reloadCache(ctx) - return c.cached, c.err -} - -// clusterName returns the cached Mesos cluster name. -func (c *stateCache) clusterName(ctx context.Context) (string, error) { - cached, err := c.cachedState(ctx) - if err != nil { - return "", err - } - return cached.clusterName, nil -} - -// nodes returns the cached list of slave nodes. -func (c *stateCache) nodes(ctx context.Context) (map[string]*slaveNode, error) { - cached, err := c.cachedState(ctx) - if err != nil { - return nil, err - } - return cached.nodes, nil -} - -func newMesosClient( - md detector.Master, - mesosHttpClientTimeout, stateCacheTTL time.Duration) (*mesosClient, error) { - - tr := utilnet.SetTransportDefaults(&http.Transport{}) - httpClient := &http.Client{ - Transport: tr, - Timeout: mesosHttpClientTimeout, - } - return createMesosClient(md, httpClient, tr, stateCacheTTL) -} - -func createMesosClient( - md detector.Master, - httpClient *http.Client, - tr *http.Transport, - stateCacheTTL time.Duration) (*mesosClient, error) { - - initialMaster := make(chan struct{}) - client := &mesosClient{ - httpClient: httpClient, - tr: tr, - initialMaster: initialMaster, - state: &stateCache{ - ttl: stateCacheTTL, - }, - } - client.state.refill = client.pollMasterForState - first := true - if err := md.Detect(detector.OnMasterChanged(func(info *mesos.MasterInfo) { - host, port := extractMasterAddress(info) - if len(host) > 0 { - client.masterLock.Lock() - defer client.masterLock.Unlock() - client.master = fmt.Sprintf("%s:%d", host, port) - if first { - first = false - close(initialMaster) - } - } - log.Infof("cloud master changed to '%v'", client.master) - })); err != nil { - log.V(1).Infof("detector initialization failed: %v", err) - return nil, err - } - return client, nil -} - -func extractMasterAddress(info *mesos.MasterInfo) (host string, port int) { - if info != nil { - host = info.GetAddress().GetHostname() - if host == "" { - host = info.GetAddress().GetIp() - } - - if host != "" { - // use port from Address - port = int(info.GetAddress().GetPort()) - } else { - // deprecated: get host and port directly from MasterInfo (and not Address) - host = info.GetHostname() - if host == "" { - host = unpackIPv4(info.GetIp()) - } - port = int(info.GetPort()) - } - } - return -} - -func unpackIPv4(ip uint32) string { - octets := make([]byte, 4, 4) - binary.BigEndian.PutUint32(octets, ip) - ipv4 := net.IP(octets) - return ipv4.String() -} - -// listSlaves returns a (possibly cached) map of slave nodes by hostname. -// Callers must not mutate the contents of the returned slice. -func (c *mesosClient) listSlaves(ctx context.Context) (map[string]*slaveNode, error) { - return c.state.nodes(ctx) -} - -// clusterName returns a (possibly cached) cluster name. -func (c *mesosClient) clusterName(ctx context.Context) (string, error) { - return c.state.clusterName(ctx) -} - -// pollMasterForState returns an array of slave nodes -func (c *mesosClient) pollMasterForState(ctx context.Context) (*mesosState, error) { - // wait for initial master detection - select { - case <-c.initialMaster: // noop - case <-ctx.Done(): - return nil, ctx.Err() - } - - master := func() string { - c.masterLock.RLock() - defer c.masterLock.RUnlock() - return c.master - }() - if master == "" { - return nil, noLeadingMasterError - } - - //TODO(jdef) should not assume master uses http (what about https?) - - var state *mesosState - successHandler := func(res *http.Response) error { - blob, err1 := ioutil.ReadAll(res.Body) - if err1 != nil { - return err1 - } - log.V(3).Infof("Got mesos state, content length %v", len(blob)) - state, err1 = parseMesosState(blob) - return err1 - } - // thinking here is that we may get some other status codes from mesos at some point: - // - authentication - // - redirection (possibly from http to https) - // ... - for _, tt := range []struct { - uri string - handlers map[int]func(*http.Response) error - }{ - { - uri: fmt.Sprintf("http://%s/state", master), - handlers: map[int]func(*http.Response) error{ - 200: successHandler, - }, - }, - { - uri: fmt.Sprintf("http://%s/state.json", master), - handlers: map[int]func(*http.Response) error{ - 200: successHandler, - }, - }, - } { - req, err := http.NewRequest("GET", tt.uri, nil) - if err != nil { - return nil, err - } - err = c.httpDo(ctx, req, func(res *http.Response, err error) error { - if err != nil { - return err - } - defer res.Body.Close() - if handler, ok := tt.handlers[res.StatusCode]; ok { - if err := handler(res); err != nil { - return err - } - } - // no handler for this error code, proceed to the next connection type - return nil - }) - if state != nil || err != nil { - return state, err - } - } - return nil, errors.New("failed to sync with Mesos master") -} - -func parseMesosState(blob []byte) (*mesosState, error) { - type State struct { - ClusterName string `json:"cluster"` - Slaves []*struct { - Id string `json:"id"` // ex: 20150106-162714-3815890698-5050-2453-S2 - Pid string `json:"pid"` // ex: slave(1)@10.22.211.18:5051 - Hostname string `json:"hostname"` // ex: 10.22.211.18, or slave-123.nowhere.com - Resources map[string]interface{} `json:"resources"` // ex: {"mem": 123, "ports": "[31000-3200]"} - } `json:"slaves"` - Frameworks []*struct { - Id string `json:"id"` // ex: 20151105-093752-3745622208-5050-1-0000 - Pid string `json:"pid"` // ex: scheduler(1)@192.168.65.228:57124 - Executors []*struct { - SlaveId string `json:"slave_id"` // ex: 20151105-093752-3745622208-5050-1-S1 - ExecutorId string `json:"executor_id"` // ex: 6704d375c68fee1e_k8sm-executor - Name string `json:"name"` // ex: Kubelet-Executor - } `json:"executors"` - } `json:"frameworks"` - } - - state := &State{ClusterName: defaultClusterName} - if err := json.Unmarshal(blob, state); err != nil { - return nil, err - } - - executorSlaveIds := map[string]struct{}{} - for _, f := range state.Frameworks { - for _, e := range f.Executors { - // Note that this simple comparison breaks when we support more than one - // k8s instance in a cluster. At the moment this is not possible for - // a number of reasons. - // TODO(sttts): find way to detect executors of this k8s instance - if e.Name == KubernetesExecutorName { - executorSlaveIds[e.SlaveId] = struct{}{} - } - } - } - - nodes := map[string]*slaveNode{} // by hostname - for _, slave := range state.Slaves { - if slave.Hostname == "" { - continue - } - node := &slaveNode{hostname: slave.Hostname} - cap := v1.ResourceList{} - if slave.Resources != nil && len(slave.Resources) > 0 { - // attempt to translate CPU (cores) and memory (MB) resources - if cpu, found := slave.Resources["cpus"]; found { - if cpuNum, ok := cpu.(float64); ok { - cap[v1.ResourceCPU] = *resource.NewQuantity(int64(cpuNum), resource.DecimalSI) - } else { - log.Warningf("unexpected slave cpu resource type %T: %v", cpu, cpu) - } - } else { - log.Warningf("slave failed to report cpu resource") - } - if mem, found := slave.Resources["mem"]; found { - if memNum, ok := mem.(float64); ok { - cap[v1.ResourceMemory] = *resource.NewQuantity(int64(memNum), resource.BinarySI) - } else { - log.Warningf("unexpected slave mem resource type %T: %v", mem, mem) - } - } else { - log.Warningf("slave failed to report mem resource") - } - } - if len(cap) > 0 { - node.resources = &v1.NodeResources{ - Capacity: cap, - } - log.V(4).Infof("node %q reporting capacity %v", node.hostname, cap) - } - if _, ok := executorSlaveIds[slave.Id]; ok { - node.kubeletRunning = true - } - nodes[node.hostname] = node - } - - result := &mesosState{ - clusterName: state.ClusterName, - nodes: nodes, - } - - return result, nil -} - -type responseHandler func(*http.Response, error) error - -// httpDo executes an HTTP request in the given context, canceling an ongoing request if the context -// is canceled prior to completion of the request. hacked from https://blog.golang.org/context -func (c *mesosClient) httpDo(ctx context.Context, req *http.Request, f responseHandler) error { - // Run the HTTP request in a goroutine and pass the response to f. - ch := make(chan error, 1) - go func() { ch <- f(c.httpClient.Do(req)) }() - select { - case <-ctx.Done(): - c.tr.CancelRequest(req) - <-ch // Wait for f to return. - return ctx.Err() - case err := <-ch: - return err - } -} diff --git a/pkg/cloudprovider/providers/mesos/client_test.go b/pkg/cloudprovider/providers/mesos/client_test.go deleted file mode 100644 index 8bcb983e11d..00000000000 --- a/pkg/cloudprovider/providers/mesos/client_test.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - "time" - - log "github.com/golang/glog" - "github.com/mesos/mesos-go/detector" - "github.com/mesos/mesos-go/mesosutil" - "golang.org/x/net/context" - - utilnet "k8s.io/apimachinery/pkg/util/net" -) - -// Test data - -const ( - TEST_MASTER_ID = "master-12345" - TEST_MASTER_IP = 177048842 // 10.141.141.10 - TEST_MASTER_PORT = 5050 - - TEST_STATE_JSON = ` - { - "version": "0.22.0", - "unregistered_frameworks": [], - "started_tasks": 0, - "start_time": 1429456501.61141, - "staged_tasks": 0, - "slaves": [ - { - "resources": { - "ports": "[31000-32000]", - "mem": 15360, - "disk": 470842, - "cpus": 8 - }, - "registered_time": 1429456502.46999, - "pid": "slave(1)@mesos1.internal.example.org.fail:5050", - "id": "20150419-081501-16777343-5050-16383-S2", - "hostname": "mesos1.internal.example.org.fail", - "attributes": {}, - "active": true - }, - { - "resources": { - "ports": "[31000-32000]", - "mem": 15360, - "disk": 470842, - "cpus": 8 - }, - "registered_time": 1429456502.4144, - "pid": "slave(1)@mesos2.internal.example.org.fail:5050", - "id": "20150419-081501-16777343-5050-16383-S1", - "hostname": "mesos2.internal.example.org.fail", - "attributes": {}, - "active": true - }, - { - "resources": { - "ports": "[31000-32000]", - "mem": 15360, - "disk": 470842, - "cpus": 8 - }, - "registered_time": 1429456502.02879, - "pid": "slave(1)@mesos3.internal.example.org.fail:5050", - "id": "20150419-081501-16777343-5050-16383-S0", - "hostname": "mesos3.internal.example.org.fail", - "attributes": {}, - "active": true - } - ], - "pid": "master@mesos-master0.internal.example.org.fail:5050", - "orphan_tasks": [], - "lost_tasks": 0, - "leader": "master@mesos-master0.internal.example.org.fail:5050", - "killed_tasks": 0, - "failed_tasks": 0, - "elected_time": 1429456501.61638, - "deactivated_slaves": 0, - "completed_frameworks": [], - "build_user": "buildbot", - "build_time": 1425085311, - "build_date": "2015-02-27 17:01:51", - "activated_slaves": 3, - "finished_tasks": 0, - "flags": { - "zk_session_timeout": "10secs", - "work_dir": "/somepath/mesos/local/Lc9arz", - "webui_dir": "/usr/local/share/mesos/webui", - "version": "false", - "user_sorter": "drf", - "slave_reregister_timeout": "10mins", - "logbufsecs": "0", - "log_auto_initialize": "true", - "initialize_driver_logging": "true", - "framework_sorter": "drf", - "authenticators": "crammd5", - "authenticate_slaves": "false", - "authenticate": "false", - "allocation_interval": "1secs", - "logging_level": "INFO", - "quiet": "false", - "recovery_slave_removal_limit": "100%", - "registry": "replicated_log", - "registry_fetch_timeout": "1mins", - "registry_store_timeout": "5secs", - "registry_strict": "false", - "root_submissions": "true" - }, - "frameworks": [], - "git_branch": "refs/heads/0.22.0-rc1", - "git_sha": "46834faca67f877631e1beb7d61be5c080ec3dc2", - "git_tag": "0.22.0-rc1", - "hostname": "localhost", - "id": "20150419-081501-16777343-5050-16383" - }` -) - -// Mocks - -type FakeMasterDetector struct { - callback detector.MasterChanged - done chan struct{} -} - -func newFakeMasterDetector() *FakeMasterDetector { - return &FakeMasterDetector{ - done: make(chan struct{}), - } -} - -func (md FakeMasterDetector) Cancel() { - close(md.done) -} - -func (md FakeMasterDetector) Detect(cb detector.MasterChanged) error { - md.callback = cb - leadingMaster := mesosutil.NewMasterInfo(TEST_MASTER_ID, TEST_MASTER_IP, TEST_MASTER_PORT) - cb.OnMasterChanged(leadingMaster) - return nil -} - -func (md FakeMasterDetector) Done() <-chan struct{} { - return md.done -} - -// Auxiliary functions - -func makeHttpMocks() (*httptest.Server, *http.Client, *http.Transport) { - httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - log.V(4).Infof("Mocking response for HTTP request: %#v", r) - if r.URL.Path == "/state.json" { - w.WriteHeader(200) // OK - w.Header().Set("Content-Type", "application/json") - fmt.Fprintln(w, TEST_STATE_JSON) - } else { - w.WriteHeader(400) - fmt.Fprintln(w, "Bad Request") - } - })) - - // Intercept all client requests and feed them to the test server - transport := utilnet.SetTransportDefaults(&http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(httpServer.URL) - }, - }) - - httpClient := &http.Client{Transport: transport} - - return httpServer, httpClient, transport -} - -// Tests - -// test mesos.parseMesosState -func Test_parseMesosState(t *testing.T) { - state, err := parseMesosState([]byte(TEST_STATE_JSON)) - - if err != nil { - t.Fatalf("parseMesosState does not yield an error") - } - if state == nil { - t.Fatalf("parseMesosState yields a non-nil state") - } - if len(state.nodes) != 3 { - t.Fatalf("parseMesosState yields a state with 3 nodes") - } -} - -// test mesos.listSlaves -func Test_listSlaves(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - - if err != nil { - t.Fatalf("createMesosClient does not yield an error") - } - - slaveNodes, err := mesosClient.listSlaves(context.TODO()) - - if err != nil { - t.Fatalf("listSlaves does not yield an error") - } - if len(slaveNodes) != 3 { - t.Fatalf("listSlaves yields a collection of size 3") - } - - expectedHostnames := map[string]struct{}{ - "mesos1.internal.example.org.fail": {}, - "mesos2.internal.example.org.fail": {}, - "mesos3.internal.example.org.fail": {}, - } - - actualHostnames := make(map[string]struct{}) - for _, node := range slaveNodes { - actualHostnames[node.hostname] = struct{}{} - } - - if !reflect.DeepEqual(expectedHostnames, actualHostnames) { - t.Fatalf("listSlaves yields a collection with the expected hostnames") - } -} - -// test mesos.clusterName -func Test_clusterName(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - - name, err := mesosClient.clusterName(context.TODO()) - - if err != nil { - t.Fatalf("clusterName does not yield an error") - } - if name != defaultClusterName { - t.Fatalf("clusterName yields the expected (default) value") - } -} diff --git a/pkg/cloudprovider/providers/mesos/config.go b/pkg/cloudprovider/providers/mesos/config.go deleted file mode 100644 index 29d3bfdc4f5..00000000000 --- a/pkg/cloudprovider/providers/mesos/config.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "io" - "time" - - "gopkg.in/gcfg.v1" -) - -const ( - DefaultMesosMaster = "localhost:5050" - DefaultHttpClientTimeout = time.Duration(10) * time.Second - DefaultStateCacheTTL = time.Duration(5) * time.Second -) - -// Example Mesos cloud provider configuration file: -// -// [mesos-cloud] -// mesos-master = leader.mesos:5050 -// http-client-timeout = 500ms -// state-cache-ttl = 1h - -type ConfigWrapper struct { - Mesos_Cloud Config -} - -type Config struct { - MesosMaster string `gcfg:"mesos-master"` - MesosHttpClientTimeout Duration `gcfg:"http-client-timeout"` - StateCacheTTL Duration `gcfg:"state-cache-ttl"` -} - -type Duration struct { - Duration time.Duration `gcfg:"duration"` -} - -func (d *Duration) UnmarshalText(data []byte) error { - underlying, err := time.ParseDuration(string(data)) - if err == nil { - d.Duration = underlying - } - return err -} - -func createDefaultConfig() *Config { - return &Config{ - MesosMaster: DefaultMesosMaster, - MesosHttpClientTimeout: Duration{Duration: DefaultHttpClientTimeout}, - StateCacheTTL: Duration{Duration: DefaultStateCacheTTL}, - } -} - -func readConfig(configReader io.Reader) (*Config, error) { - config := createDefaultConfig() - wrapper := &ConfigWrapper{Mesos_Cloud: *config} - if configReader != nil { - if err := gcfg.ReadInto(wrapper, configReader); err != nil { - return nil, err - } - config = &(wrapper.Mesos_Cloud) - } - return config, nil -} diff --git a/pkg/cloudprovider/providers/mesos/config_test.go b/pkg/cloudprovider/providers/mesos/config_test.go deleted file mode 100644 index 4637ab852b2..00000000000 --- a/pkg/cloudprovider/providers/mesos/config_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "bytes" - "testing" - "time" - - log "github.com/golang/glog" -) - -// test mesos.createDefaultConfig -func Test_createDefaultConfig(t *testing.T) { - defer log.Flush() - - config := createDefaultConfig() - - if config.MesosMaster != DefaultMesosMaster { - t.Fatalf("Default config has the expected MesosMaster value") - } - - if config.MesosHttpClientTimeout.Duration != DefaultHttpClientTimeout { - t.Fatalf("Default config has the expected MesosHttpClientTimeout value") - } - - if config.StateCacheTTL.Duration != DefaultStateCacheTTL { - t.Fatalf("Default config has the expected StateCacheTTL value") - } -} - -// test mesos.readConfig -func Test_readConfig(t *testing.T) { - defer log.Flush() - - configString := ` -[mesos-cloud] - mesos-master = leader.mesos:5050 - http-client-timeout = 500ms - state-cache-ttl = 1h` - - reader := bytes.NewBufferString(configString) - - config, err := readConfig(reader) - - if err != nil { - t.Fatalf("Reading configuration does not yield an error: %#v", err) - } - - if config.MesosMaster != "leader.mesos:5050" { - t.Fatalf("Parsed config has the expected MesosMaster value") - } - - if config.MesosHttpClientTimeout.Duration != time.Duration(500)*time.Millisecond { - t.Fatalf("Parsed config has the expected MesosHttpClientTimeout value") - } - - if config.StateCacheTTL.Duration != time.Duration(1)*time.Hour { - t.Fatalf("Parsed config has the expected StateCacheTTL value") - } -} diff --git a/pkg/cloudprovider/providers/mesos/mesos.go b/pkg/cloudprovider/providers/mesos/mesos.go deleted file mode 100644 index e484be0d743..00000000000 --- a/pkg/cloudprovider/providers/mesos/mesos.go +++ /dev/null @@ -1,315 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "errors" - "fmt" - "io" - "net" - "regexp" - - "golang.org/x/net/context" - - log "github.com/golang/glog" - "github.com/mesos/mesos-go/detector" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" -) - -const ( - ProviderName = "mesos" - - // KubernetesExecutorName is shared between contrib/mesos and Mesos cloud provider. - // Because cloud provider -> contrib dependencies are forbidden, this constant - // is defined here, not in contrib. - KubernetesExecutorName = "Kubelet-Executor" -) - -var ( - CloudProvider *MesosCloud - - noHostNameSpecified = errors.New("No hostname specified") -) - -func init() { - cloudprovider.RegisterCloudProvider( - ProviderName, - func(configReader io.Reader) (cloudprovider.Interface, error) { - provider, err := newMesosCloud(configReader) - if err == nil { - CloudProvider = provider - } - return provider, err - }) -} - -type MesosCloud struct { - client *mesosClient - config *Config -} - -func (c *MesosCloud) MasterURI() string { - return c.config.MesosMaster -} - -func newMesosCloud(configReader io.Reader) (*MesosCloud, error) { - config, err := readConfig(configReader) - if err != nil { - return nil, err - } - - log.V(1).Infof("new mesos cloud, master='%v'", config.MesosMaster) - if d, err := detector.New(config.MesosMaster); err != nil { - log.V(1).Infof("failed to create master detector: %v", err) - return nil, err - } else if cl, err := newMesosClient(d, - config.MesosHttpClientTimeout.Duration, - config.StateCacheTTL.Duration); err != nil { - log.V(1).Infof("failed to create mesos cloud client: %v", err) - return nil, err - } else { - return &MesosCloud{client: cl, config: config}, nil - } -} - -// Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (c *MesosCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} - -// Implementation of Instances.CurrentNodeName -func (c *MesosCloud) CurrentNodeName(hostname string) (types.NodeName, error) { - return types.NodeName(hostname), nil -} - -func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") -} - -// Instances returns a copy of the Mesos cloud Instances implementation. -// Mesos natively provides minimal cloud-type resources. More robust cloud -// support requires a combination of Mesos and cloud-specific knowledge. -func (c *MesosCloud) Instances() (cloudprovider.Instances, bool) { - return c, true -} - -// LoadBalancer always returns nil, false in this implementation. -// Mesos does not provide any type of native load balancing by default, -// so this implementation always returns (nil, false). -func (c *MesosCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return nil, false -} - -// Zones always returns nil, false in this implementation. -// Mesos does not provide any type of native region or zone awareness, -// so this implementation always returns (nil, false). -func (c *MesosCloud) Zones() (cloudprovider.Zones, bool) { - return nil, false -} - -// Clusters returns a copy of the Mesos cloud Clusters implementation. -// Mesos does not provide support for multiple clusters. -func (c *MesosCloud) Clusters() (cloudprovider.Clusters, bool) { - return c, true -} - -// Routes always returns nil, false in this implementation. -func (c *MesosCloud) Routes() (cloudprovider.Routes, bool) { - return nil, false -} - -// ProviderName returns the cloud provider ID. -func (c *MesosCloud) ProviderName() string { - return ProviderName -} - -// ScrubDNS filters DNS settings for pods. -func (c *MesosCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - -// ListClusters lists the names of the available Mesos clusters. -func (c *MesosCloud) ListClusters() ([]string, error) { - // Always returns a single cluster (this one!) - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - name, err := c.client.clusterName(ctx) - return []string{name}, err -} - -// Master gets back the address (either DNS name or IP address) of the leading Mesos master node for the cluster. -func (c *MesosCloud) Master(clusterName string) (string, error) { - clusters, err := c.ListClusters() - if err != nil { - return "", err - } - for _, name := range clusters { - if name == clusterName { - if c.client.master == "" { - return "", errors.New("The currently leading master is unknown.") - } - - host, _, err := net.SplitHostPort(c.client.master) - if err != nil { - return "", err - } - - return host, nil - } - } - return "", fmt.Errorf("The supplied cluster '%v' does not exist", clusterName) -} - -// ipAddress returns an IP address of the specified instance. -func ipAddress(name string) (net.IP, error) { - if name == "" { - return nil, noHostNameSpecified - } - ipaddr := net.ParseIP(name) - if ipaddr != nil { - return ipaddr, nil - } - iplist, err := net.LookupIP(name) - if err != nil { - log.V(2).Infof("failed to resolve IP from host name '%v': %v", name, err) - return nil, err - } - ipaddr = iplist[0] - log.V(2).Infof("resolved host '%v' to '%v'", name, ipaddr) - return ipaddr, nil -} - -// mapNodeNameToPrivateDNSName maps a k8s NodeName to an mesos hostname. -// This is a simple string cast -func mapNodeNameToHostname(nodeName types.NodeName) string { - return string(nodeName) -} - -// ExternalID returns the cloud provider ID of the instance with the specified nodeName (deprecated). -func (c *MesosCloud) ExternalID(nodeName types.NodeName) (string, error) { - hostname := mapNodeNameToHostname(nodeName) - //TODO(jdef) use a timeout here? 15s? - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - nodes, err := c.client.listSlaves(ctx) - if err != nil { - return "", err - } - - node := nodes[hostname] - if node == nil { - return "", cloudprovider.InstanceNotFound - } - - ip, err := ipAddress(node.hostname) - if err != nil { - return "", err - } - return ip.String(), nil -} - -// InstanceID returns the cloud provider ID of the instance with the specified nodeName. -func (c *MesosCloud) InstanceID(nodeName types.NodeName) (string, error) { - return "", nil -} - -// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID -// This method will not be called from the node that is requesting this ID. i.e. metadata service -// and other local methods cannot be used here -func (c *MesosCloud) InstanceTypeByProviderID(providerID string) (string, error) { - return "", errors.New("unimplemented") -} - -// InstanceType returns the type of the instance with the specified nodeName. -func (c *MesosCloud) InstanceType(nodeName types.NodeName) (string, error) { - return "", nil -} - -func (c *MesosCloud) listNodes() (map[string]*slaveNode, error) { - //TODO(jdef) use a timeout here? 15s? - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - nodes, err := c.client.listSlaves(ctx) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - log.V(2).Info("no slaves found, are any running?") - return nil, nil - } - return nodes, nil -} - -// List lists instances that match 'filter' which is a regular expression -// which must match the entire instance name (fqdn). -func (c *MesosCloud) List(filter string) ([]types.NodeName, error) { - nodes, err := c.listNodes() - if err != nil { - return nil, err - } - filterRegex, err := regexp.Compile(filter) - if err != nil { - return nil, err - } - names := []types.NodeName{} - for _, node := range nodes { - if filterRegex.MatchString(node.hostname) { - names = append(names, types.NodeName(node.hostname)) - } - } - return names, nil -} - -// ListWithKubelet list those instance which have no running kubelet, i.e. the -// Kubernetes executor. -func (c *MesosCloud) ListWithoutKubelet() ([]string, error) { - nodes, err := c.listNodes() - if err != nil { - return nil, err - } - addr := make([]string, 0, len(nodes)) - for _, n := range nodes { - if !n.kubeletRunning { - addr = append(addr, n.hostname) - } - } - return addr, nil -} - -// NodeAddresses returns the addresses of the instance with the specified nodeName. -func (c *MesosCloud) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, error) { - name := mapNodeNameToHostname(nodeName) - ip, err := ipAddress(name) - if err != nil { - return nil, err - } - return []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: ip.String()}, - {Type: v1.NodeExternalIP, Address: ip.String()}, - }, nil -} - -// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID -// This method will not be called from the node that is requesting this ID. i.e. metadata service -// and other local methods cannot be used here -func (c *MesosCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - return []v1.NodeAddress{}, errors.New("unimplemented") -} diff --git a/pkg/cloudprovider/providers/mesos/mesos_test.go b/pkg/cloudprovider/providers/mesos/mesos_test.go deleted file mode 100644 index cd80fdba95c..00000000000 --- a/pkg/cloudprovider/providers/mesos/mesos_test.go +++ /dev/null @@ -1,280 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mesos - -import ( - "bytes" - "net" - "reflect" - "testing" - "time" - - log "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" -) - -func TestIPAddress(t *testing.T) { - expected4 := net.IPv4(127, 0, 0, 1) - ip, err := ipAddress("127.0.0.1") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ip, expected4) { - t.Fatalf("expected %#v instead of %#v", expected4, ip) - } - - expected6 := net.ParseIP("::1") - if expected6 == nil { - t.Fatalf("failed to parse ipv6 ::1") - } - ip, err = ipAddress("::1") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ip, expected6) { - t.Fatalf("expected %#v instead of %#v", expected6, ip) - } - - ip, err = ipAddress("localhost") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ip, expected4) && !reflect.DeepEqual(ip, expected6) { - t.Fatalf("expected %#v or %#v instead of %#v", expected4, expected6, ip) - } - - _, err = ipAddress("") - if err != noHostNameSpecified { - t.Fatalf("expected error noHostNameSpecified but got none") - } -} - -// test mesos.newMesosCloud with no config -func Test_newMesosCloud_NoConfig(t *testing.T) { - defer log.Flush() - mesosCloud, err := newMesosCloud(nil) - - if err != nil { - t.Fatalf("Creating a new Mesos cloud provider without config does not yield an error: %#v", err) - } - - if mesosCloud.client.httpClient.Timeout != DefaultHttpClientTimeout { - t.Fatalf("Creating a new Mesos cloud provider without config does not yield an error: %#v", err) - } - - if mesosCloud.client.state.ttl != DefaultStateCacheTTL { - t.Fatalf("Mesos client with default config has the expected state cache TTL value") - } -} - -// test mesos.newMesosCloud with custom config -func Test_newMesosCloud_WithConfig(t *testing.T) { - defer log.Flush() - - configString := ` -[mesos-cloud] - http-client-timeout = 500ms - state-cache-ttl = 1h` - - reader := bytes.NewBufferString(configString) - - mesosCloud, err := newMesosCloud(reader) - - if err != nil { - t.Fatalf("Creating a new Mesos cloud provider with a custom config does not yield an error: %#v", err) - } - - if mesosCloud.client.httpClient.Timeout != time.Duration(500)*time.Millisecond { - t.Fatalf("Mesos client with a custom config has the expected HTTP client timeout value") - } - - if mesosCloud.client.state.ttl != time.Duration(1)*time.Hour { - t.Fatalf("Mesos client with a custom config has the expected state cache TTL value") - } -} - -// tests for capability reporting functions - -// test mesos.Instances -func Test_Instances(t *testing.T) { - defer log.Flush() - mesosCloud, _ := newMesosCloud(nil) - - instances, supports_instances := mesosCloud.Instances() - - if !supports_instances || instances == nil { - t.Fatalf("MesosCloud provides an implementation of Instances") - } -} - -// test mesos.LoadBalancer -func Test_TcpLoadBalancer(t *testing.T) { - defer log.Flush() - mesosCloud, _ := newMesosCloud(nil) - - lb, supports_lb := mesosCloud.LoadBalancer() - - if supports_lb || lb != nil { - t.Fatalf("MesosCloud does not provide an implementation of LoadBalancer") - } -} - -// test mesos.Zones -func Test_Zones(t *testing.T) { - defer log.Flush() - mesosCloud, _ := newMesosCloud(nil) - - zones, supports_zones := mesosCloud.Zones() - - if supports_zones || zones != nil { - t.Fatalf("MesosCloud does not provide an implementation of Zones") - } -} - -// test mesos.Clusters -func Test_Clusters(t *testing.T) { - defer log.Flush() - mesosCloud, _ := newMesosCloud(nil) - - clusters, supports_clusters := mesosCloud.Clusters() - - if !supports_clusters || clusters == nil { - t.Fatalf("MesosCloud does not provide an implementation of Clusters") - } -} - -// test mesos.MasterURI -func Test_MasterURI(t *testing.T) { - defer log.Flush() - mesosCloud, _ := newMesosCloud(nil) - - uri := mesosCloud.MasterURI() - - if uri != DefaultMesosMaster { - t.Fatalf("MasterURI returns the expected master URI (expected \"localhost\", actual \"%s\"", uri) - } -} - -// test mesos.ListClusters -func Test_ListClusters(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} - - clusters, err := mesosCloud.ListClusters() - - if err != nil { - t.Fatalf("ListClusters does not yield an error: %#v", err) - } - - if len(clusters) != 1 { - t.Fatalf("ListClusters should return a list of size 1: (actual: %#v)", clusters) - } - - expectedClusterNames := []string{"mesos"} - - if !reflect.DeepEqual(clusters, expectedClusterNames) { - t.Fatalf("ListClusters should return the expected list of names: (expected: %#v, actual: %#v)", - expectedClusterNames, - clusters) - } -} - -// test mesos.Master -func Test_Master(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} - - clusters, err := mesosCloud.ListClusters() - clusterName := clusters[0] - master, err := mesosCloud.Master(clusterName) - - if err != nil { - t.Fatalf("Master does not yield an error: %#v", err) - } - - expectedMaster := unpackIPv4(TEST_MASTER_IP) - - if master != expectedMaster { - t.Fatalf("Master returns the unexpected value: (expected: %#v, actual: %#v", expectedMaster, master) - } -} - -// test mesos.List -func Test_List(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} - - clusters, err := mesosCloud.List(".*") // recognizes the language of all strings - - if err != nil { - t.Fatalf("List does not yield an error: %#v", err) - } - - if len(clusters) != 3 { - t.Fatalf("List with a catch-all filter should return a list of size 3: (actual: %#v)", clusters) - } - - clusters, err = mesosCloud.List("$^") // end-of-string followed by start-of-string: recognizes the empty language - - if err != nil { - t.Fatalf("List does not yield an error: %#v", err) - } - - if len(clusters) != 0 { - t.Fatalf("List with a reject-all filter should return a list of size 0: (actual: %#v)", clusters) - } -} - -func Test_ExternalID(t *testing.T) { - defer log.Flush() - md := FakeMasterDetector{} - httpServer, httpClient, httpTransport := makeHttpMocks() - defer httpServer.Close() - cacheTTL := 500 * time.Millisecond - mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL) - mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()} - - _, err = mesosCloud.ExternalID("unknown") - if err != cloudprovider.InstanceNotFound { - t.Fatalf("ExternalID did not return InstanceNotFound on an unknown instance") - } - - slaveName := types.NodeName("mesos3.internal.example.org.fail") - id, err := mesosCloud.ExternalID(slaveName) - if id != "" { - t.Fatalf("ExternalID should not be able to resolve %q", slaveName) - } - if err == cloudprovider.InstanceNotFound { - t.Fatalf("ExternalID should find %q", slaveName) - } -} diff --git a/pkg/cloudprovider/providers/openstack/OWNERS b/pkg/cloudprovider/providers/openstack/OWNERS index 7f6c5023bcc..a9ec9e985dd 100644 --- a/pkg/cloudprovider/providers/openstack/OWNERS +++ b/pkg/cloudprovider/providers/openstack/OWNERS @@ -1,6 +1,8 @@ approvers: - anguslees - NickrenREN +- dims reviewers: - anguslees - NickrenREN +- dims \ No newline at end of file diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 31f913426ea..7f9ebc78049 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -75,10 +75,10 @@ type LoadBalancer struct { } type LoadBalancerOpts struct { - LBVersion string `gcfg:"lb-version"` // overrides autodetection. v1 or v2 - SubnetId string `gcfg:"subnet-id"` // required - FloatingNetworkId string `gcfg:"floating-network-id"` - LBMethod string `gcfg:"lb-method"` + LBVersion string `gcfg:"lb-version"` // overrides autodetection. v1 or v2 + SubnetId string `gcfg:"subnet-id"` // required + FloatingNetworkId string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. + LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN. CreateMonitor bool `gcfg:"create-monitor"` MonitorDelay MyDuration `gcfg:"monitor-delay"` MonitorTimeout MyDuration `gcfg:"monitor-timeout"` @@ -216,6 +216,40 @@ func readInstanceID() (string, error) { return md.Uuid, nil } +// check opts for OpenStack +func checkOpenStackOpts(openstackOpts *OpenStack) error { + lbOpts := openstackOpts.lbOpts + + // subnet-id is required + if len(lbOpts.SubnetId) == 0 { + return fmt.Errorf("subnet-id not set in cloud provider config") + } + + // if need to create health monitor for Neutron LB, + // monitor-delay, monitor-timeout and monitor-max-retries should be set. + emptyDuration := MyDuration{} + if lbOpts.CreateMonitor { + if lbOpts.MonitorDelay == emptyDuration { + return fmt.Errorf("monitor-delay not set in cloud provider config") + } + if lbOpts.MonitorTimeout == emptyDuration { + return fmt.Errorf("monitor-timeout not set in cloud provider config") + } + if lbOpts.MonitorMaxRetries == uint(0) { + return fmt.Errorf("monitor-max-retries not set in cloud provider config") + } + } + + // if enable ManageSecurityGroups, node-security-group should be set. + if lbOpts.ManageSecurityGroups { + if len(lbOpts.NodeSecurityGroupID) == 0 { + return fmt.Errorf("node-security-group not set in cloud provider config") + } + } + + return nil +} + func newOpenStack(cfg Config) (*OpenStack, error) { provider, err := openstack.NewClient(cfg.Global.AuthUrl) if err != nil { @@ -260,6 +294,11 @@ func newOpenStack(cfg Config) (*OpenStack, error) { localInstanceID: id, } + err = checkOpenStackOpts(&os) + if err != nil { + return nil, err + } + return &os, nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index fd94eacfbc8..d75024c39e5 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -18,6 +18,8 @@ package openstack import ( "errors" + "fmt" + "net/url" "github.com/golang/glog" "github.com/gophercloud/gophercloud" @@ -107,7 +109,24 @@ func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - return []v1.NodeAddress{}, errors.New("unimplemented") + instanceID, err := instanceIDFromProviderID(providerID) + + if err != nil { + return []v1.NodeAddress{}, err + } + + server, err := servers.Get(i.compute, instanceID).Extract() + + if err != nil { + return []v1.NodeAddress{}, err + } + + addresses, err := nodeAddresses(server) + if err != nil { + return []v1.NodeAddress{}, err + } + + return addresses, nil } // ExternalID returns the cloud provider ID of the specified instance (deprecated). @@ -142,10 +161,56 @@ func (i *Instances) InstanceID(name types.NodeName) (string, error) { // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) { - return "", errors.New("unimplemented") + instanceID, err := instanceIDFromProviderID(providerID) + + if err != nil { + return "", err + } + + server, err := servers.Get(i.compute, instanceID).Extract() + + if err != nil { + return "", err + } + + return srvInstanceType(server) } // InstanceType returns the type of the specified instance. func (i *Instances) InstanceType(name types.NodeName) (string, error) { - return "", nil + srv, err := getServerByName(i.compute, name) + + if err != nil { + return "", err + } + + return srvInstanceType(srv) +} + +func srvInstanceType(srv *servers.Server) (string, error) { + val, ok := srv.Flavor["name"] + + if !ok { + return "", fmt.Errorf("flavor name not present in server info") + } + + flavor, ok := val.(string) + + if !ok { + return "", fmt.Errorf("flavor name is not a string") + } + + return flavor, nil +} + +func instanceIDFromProviderID(providerID string) (instanceID string, err error) { + parsedID, err := url.Parse(providerID) + if err != nil { + return "", err + } + if parsedID.Scheme != ProviderName { + return "", fmt.Errorf("unrecognized provider %q", parsedID.Scheme) + } + + return parsedID.Host, nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 413e7750288..d1d2f583aa2 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -546,15 +546,6 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string) (*loa return loadbalancer, nil } -func stringInArray(x string, list []string) bool { - for _, y := range list { - if y == x { - return true - } - } - return false -} - func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) loadbalancer, err := getLoadbalancerByName(lbaas.network, loadBalancerName) @@ -620,7 +611,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers without managing security groups") } - affinity := v1.ServiceAffinityNone + affinity := apiService.Spec.SessionAffinity var persistence *v2pools.SessionPersistence switch affinity { case v1.ServiceAffinityNone: @@ -763,9 +754,13 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) monitorID = monitor.ID + } else if lbaas.opts.CreateMonitor == false { + glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) } - glog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) + if monitorID != "" { + glog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) + } } // All remaining listeners are obsolete, delete @@ -1106,7 +1101,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. return fmt.Errorf("Error getting pool for listener %s: %v", listener.ID, err) } poolIDs = append(poolIDs, pool.ID) - monitorIDs = append(monitorIDs, pool.MonitorID) + // If create-monitor of cloud-config is false, pool has not monitor. + if pool.MonitorID != "" { + monitorIDs = append(monitorIDs, pool.MonitorID) + } } // get all members associated with each poolIDs @@ -1287,7 +1285,7 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service LBMethod: lbmethod, }).Extract() if err != nil { - return nil, err + return nil, fmt.Errorf("Error creating pool for openstack load balancer %s: %v", name, err) } for _, node := range nodes { @@ -1302,8 +1300,8 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service Address: addr, }).Extract() if err != nil { - pools.Delete(lb.network, pool.ID) - return nil, err + return nil, fmt.Errorf("Error creating member for the pool(%s) of openstack load balancer %s: %v", + pool.ID, name, err) } } @@ -1316,15 +1314,13 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service MaxRetries: int(lb.opts.MonitorMaxRetries), }).Extract() if err != nil { - pools.Delete(lb.network, pool.ID) - return nil, err + return nil, fmt.Errorf("Error creating monitor for openstack load balancer %s: %v", name, err) } _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() if err != nil { - monitors.Delete(lb.network, mon.ID) - pools.Delete(lb.network, pool.ID) - return nil, err + return nil, fmt.Errorf("Error associating monitor(%s) with pool(%s) for"+ + "openstack load balancer %s: %v", mon.ID, pool.ID, name, err) } } @@ -1345,11 +1341,7 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service vip, err := vips.Create(lb.network, createOpts).Extract() if err != nil { - if mon != nil { - monitors.Delete(lb.network, mon.ID) - } - pools.Delete(lb.network, pool.ID) - return nil, err + return nil, fmt.Errorf("Error creating vip for openstack load balancer %s: %v", name, err) } status := &v1.LoadBalancerStatus{} @@ -1363,7 +1355,7 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service } floatIP, err := floatingips.Create(lb.network, floatIPOpts).Extract() if err != nil { - return nil, err + return nil, fmt.Errorf("Error creating floatingip for openstack load balancer %s: %v", name, err) } status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: floatIP.FloatingIP}) diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index 28e5160a599..2cb0cd95ff0 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -262,8 +262,8 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err if index != -1 { // Delete element `index` - addr_pairs[index] = addr_pairs[len(routes)-1] - addr_pairs = addr_pairs[:len(routes)-1] + addr_pairs[index] = addr_pairs[len(addr_pairs)-1] + addr_pairs = addr_pairs[:len(addr_pairs)-1] unwind, err := updateAllowedAddressPairs(r.network, &port, addr_pairs) if err != nil { diff --git a/pkg/cloudprovider/providers/openstack/openstack_test.go b/pkg/cloudprovider/providers/openstack/openstack_test.go index 6ac40884e6e..1471cb262e9 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_test.go +++ b/pkg/cloudprovider/providers/openstack/openstack_test.go @@ -18,9 +18,6 @@ package openstack import ( "fmt" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "os" "reflect" "sort" @@ -28,7 +25,11 @@ import ( "testing" "time" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" @@ -144,6 +145,100 @@ func TestToAuthOptions(t *testing.T) { } } +func TestCheckOpenStackOpts(t *testing.T) { + delay := MyDuration{60 * time.Second} + timeout := MyDuration{30 * time.Second} + tests := []struct { + name string + openstackOpts *OpenStack + expectedError error + }{ + { + name: "test1", + openstackOpts: &OpenStack{ + provider: nil, + lbOpts: LoadBalancerOpts{ + LBVersion: "v2", + SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + LBMethod: "ROUND_ROBIN", + CreateMonitor: true, + MonitorDelay: delay, + MonitorTimeout: timeout, + MonitorMaxRetries: uint(3), + ManageSecurityGroups: true, + NodeSecurityGroupID: "b41d28c2-d02f-4e1e-8ffb-23b8e4f5c144", + }, + }, + expectedError: nil, + }, + { + name: "test2", + openstackOpts: &OpenStack{ + provider: nil, + lbOpts: LoadBalancerOpts{ + LBVersion: "v2", + FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + LBMethod: "ROUND_ROBIN", + CreateMonitor: true, + MonitorDelay: delay, + MonitorTimeout: timeout, + MonitorMaxRetries: uint(3), + ManageSecurityGroups: true, + NodeSecurityGroupID: "b41d28c2-d02f-4e1e-8ffb-23b8e4f5c144", + }, + }, + expectedError: fmt.Errorf("subnet-id not set in cloud provider config"), + }, + { + name: "test3", + openstackOpts: &OpenStack{ + provider: nil, + lbOpts: LoadBalancerOpts{ + LBVersion: "v2", + SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + LBMethod: "ROUND_ROBIN", + CreateMonitor: true, + ManageSecurityGroups: true, + NodeSecurityGroupID: "b41d28c2-d02f-4e1e-8ffb-23b8e4f5c144", + }, + }, + expectedError: fmt.Errorf("monitor-delay not set in cloud provider config"), + }, + { + name: "test4", + openstackOpts: &OpenStack{ + provider: nil, + lbOpts: LoadBalancerOpts{ + LBVersion: "v2", + SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef", + FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786", + LBMethod: "ROUND_ROBIN", + CreateMonitor: true, + MonitorDelay: delay, + MonitorTimeout: timeout, + MonitorMaxRetries: uint(3), + ManageSecurityGroups: true, + }, + }, + expectedError: fmt.Errorf("node-security-group not set in cloud provider config"), + }, + } + + for _, testcase := range tests { + err := checkOpenStackOpts(testcase.openstackOpts) + + if err == nil && testcase.expectedError == nil { + continue + } + if (err != nil && testcase.expectedError == nil) || (err == nil && testcase.expectedError != nil) || err.Error() != testcase.expectedError.Error() { + t.Errorf("%s failed: expected err=%q, got %q", + testcase.name, testcase.expectedError, err) + } + } +} + func TestCaller(t *testing.T) { called := false myFunc := func() { called = true } @@ -449,3 +544,42 @@ func TestCinderAutoDetectApiVersion(t *testing.T) { } } } + +func TestInstanceIDFromProviderID(t *testing.T) { + testCases := []struct { + providerID string + instanceID string + fail bool + }{ + { + providerID: "openstack://7b9cf879-7146-417c-abfd-cb4272f0c935", + instanceID: "7b9cf879-7146-417c-abfd-cb4272f0c935", + fail: false, + }, + { + providerID: "7b9cf879-7146-417c-abfd-cb4272f0c935", + instanceID: "", + fail: true, + }, + { + providerID: "other-provider://7b9cf879-7146-417c-abfd-cb4272f0c935", + instanceID: "", + fail: true, + }, + } + + for _, test := range testCases { + instanceID, err := instanceIDFromProviderID(test.providerID) + if (err != nil) != test.fail { + t.Errorf("%s yielded `err != nil` as %t. expected %t", test.providerID, (err != nil), test.fail) + } + + if test.fail { + continue + } + + if instanceID != test.instanceID { + t.Errorf("%s yielded %s. expected %s", test.providerID, instanceID, test.instanceID) + } + } +} diff --git a/pkg/cloudprovider/providers/photon/photon.go b/pkg/cloudprovider/providers/photon/photon.go index b52f11e9f56..8ef0b1dcfcc 100644 --- a/pkg/cloudprovider/providers/photon/photon.go +++ b/pkg/cloudprovider/providers/photon/photon.go @@ -187,7 +187,7 @@ func getVMIDbyIP(pc *PCCloud, IPAddress string) (string, error) { } else { task, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Warning("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) + glog.Warningf("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) } else { networkConnections := task.ResourceProperties.(map[string]interface{}) networks := networkConnections["networkConnections"].([]interface{}) @@ -258,7 +258,7 @@ func getPhotonClient(pc *PCCloud) (*photon.Client, error) { glog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err) return nil, err } - glog.V(2).Info("Photon Cloud Provider: Status of the new photon controller client: %v", status) + glog.V(2).Infof("Photon Cloud Provider: Status of the new photon controller client: %v", status) return pc.photonClient, nil } diff --git a/pkg/cloudprovider/providers/providers.go b/pkg/cloudprovider/providers/providers.go index 4bc1572c900..89b9e6d2246 100644 --- a/pkg/cloudprovider/providers/providers.go +++ b/pkg/cloudprovider/providers/providers.go @@ -22,7 +22,6 @@ import ( _ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - _ "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 1f6e68d560e..fe2ff0acbd8 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -580,7 +580,8 @@ func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - return []v1.NodeAddress{}, errors.New("unimplemented") + vmName := path.Base(providerID) + return vs.NodeAddresses(vmNameToNodeName(vmName)) } func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error { @@ -663,7 +664,7 @@ func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) { // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (vs *VSphere) InstanceTypeByProviderID(providerID string) (string, error) { - return "", errors.New("unimplemented") + return "", nil } func (vs *VSphere) InstanceType(name k8stypes.NodeName) (string, error) { diff --git a/pkg/controller/OWNERS b/pkg/controller/OWNERS index 56f54dddc8e..6b09a34b804 100644 --- a/pkg/controller/OWNERS +++ b/pkg/controller/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - deads2k - derekwaynecarr - mikedanese diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 3ef7ade5c7d..64d6b8c0a62 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -44,31 +44,31 @@ func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { return nil } -type baseControllerRefManager struct { - controller metav1.Object - selector labels.Selector +type BaseControllerRefManager struct { + Controller metav1.Object + Selector labels.Selector canAdoptErr error canAdoptOnce sync.Once - canAdoptFunc func() error + CanAdoptFunc func() error } -func (m *baseControllerRefManager) canAdopt() error { +func (m *BaseControllerRefManager) CanAdopt() error { m.canAdoptOnce.Do(func() { - if m.canAdoptFunc != nil { - m.canAdoptErr = m.canAdoptFunc() + if m.CanAdoptFunc != nil { + m.canAdoptErr = m.CanAdoptFunc() } }) return m.canAdoptErr } -// claimObject tries to take ownership of an object for this controller. +// ClaimObject tries to take ownership of an object for this controller. // // It will reconcile the following: // * Adopt orphans if the match function returns true. // * Release owned objects if the match function returns false. // -// A non-nil error is returned if some form of reconciliation was attemped and +// A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation // is still needed. // @@ -77,10 +77,10 @@ func (m *baseControllerRefManager) canAdopt() error { // own the object. // // No reconciliation will be attempted if the controller is being deleted. -func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) { +func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) { controllerRef := GetControllerOf(obj) if controllerRef != nil { - if controllerRef.UID != m.controller.GetUID() { + if controllerRef.UID != m.Controller.GetUID() { // Owned by someone else. Ignore. return false, nil } @@ -93,7 +93,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met } // Owned by us but selector doesn't match. // Try to release, unless we're being deleted. - if m.controller.GetDeletionTimestamp() != nil { + if m.Controller.GetDeletionTimestamp() != nil { return false, nil } if err := release(obj); err != nil { @@ -110,7 +110,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met } // It's an orphan. - if m.controller.GetDeletionTimestamp() != nil || !match(obj) { + if m.Controller.GetDeletionTimestamp() != nil || !match(obj) { // Ignore if we're being deleted or selector doesn't match. return false, nil } @@ -133,7 +133,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met } type PodControllerRefManager struct { - baseControllerRefManager + BaseControllerRefManager controllerKind schema.GroupVersionKind podControl PodControlInterface } @@ -141,14 +141,14 @@ type PodControllerRefManager struct { // NewPodControllerRefManager returns a PodControllerRefManager that exposes // methods to manage the controllerRef of pods. // -// The canAdopt() function can be used to perform a potentially expensive check +// The CanAdopt() function can be used to perform a potentially expensive check // (such as a live GET from the API server) prior to the first adoption. // It will only be called (at most once) if an adoption is actually attempted. -// If canAdopt() returns a non-nil error, all adoptions will fail. +// If CanAdopt() returns a non-nil error, all adoptions will fail. // -// NOTE: Once canAdopt() is called, it will not be called again by the same +// NOTE: Once CanAdopt() is called, it will not be called again by the same // PodControllerRefManager instance. Create a new instance if it makes -// sense to check canAdopt() again (e.g. in a different sync pass). +// sense to check CanAdopt() again (e.g. in a different sync pass). func NewPodControllerRefManager( podControl PodControlInterface, controller metav1.Object, @@ -157,10 +157,10 @@ func NewPodControllerRefManager( canAdopt func() error, ) *PodControllerRefManager { return &PodControllerRefManager{ - baseControllerRefManager: baseControllerRefManager{ - controller: controller, - selector: selector, - canAdoptFunc: canAdopt, + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, }, controllerKind: controllerKind, podControl: podControl, @@ -176,7 +176,7 @@ func NewPodControllerRefManager( // Optional: If one or more filters are specified, a Pod will only be claimed if // all filters return true. // -// A non-nil error is returned if some form of reconciliation was attemped and +// A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation // is still needed. // @@ -189,7 +189,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1. match := func(obj metav1.Object) bool { pod := obj.(*v1.Pod) // Check selector first so filters only run on potentially matching Pods. - if !m.selector.Matches(labels.Set(pod.Labels)) { + if !m.Selector.Matches(labels.Set(pod.Labels)) { return false } for _, filter := range filters { @@ -207,7 +207,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1. } for _, pod := range pods { - ok, err := m.claimObject(pod, match, adopt, release) + ok, err := m.ClaimObject(pod, match, adopt, release) if err != nil { errlist = append(errlist, err) continue @@ -222,7 +222,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1. // AdoptPod sends a patch to take control of the pod. It returns the error if // the patching fails. func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { - if err := m.canAdopt(); err != nil { + if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err) } // Note that ValidateOwnerReferences() will reject this patch if another @@ -230,7 +230,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { addControllerPatch := fmt.Sprintf( `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.controller.GetName(), m.controller.GetUID(), pod.UID) + m.Controller.GetName(), m.Controller.GetUID(), pod.UID) return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch)) } @@ -238,8 +238,8 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", - pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), pod.UID) + pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) if err != nil { if errors.IsNotFound(err) { @@ -267,7 +267,7 @@ func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { // categories and accordingly adopt or release them. See comments on these functions // for more details. type ReplicaSetControllerRefManager struct { - baseControllerRefManager + BaseControllerRefManager controllerKind schema.GroupVersionKind rsControl RSControlInterface } @@ -275,14 +275,14 @@ type ReplicaSetControllerRefManager struct { // NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes // methods to manage the controllerRef of ReplicaSets. // -// The canAdopt() function can be used to perform a potentially expensive check +// The CanAdopt() function can be used to perform a potentially expensive check // (such as a live GET from the API server) prior to the first adoption. // It will only be called (at most once) if an adoption is actually attempted. -// If canAdopt() returns a non-nil error, all adoptions will fail. +// If CanAdopt() returns a non-nil error, all adoptions will fail. // -// NOTE: Once canAdopt() is called, it will not be called again by the same +// NOTE: Once CanAdopt() is called, it will not be called again by the same // ReplicaSetControllerRefManager instance. Create a new instance if it -// makes sense to check canAdopt() again (e.g. in a different sync pass). +// makes sense to check CanAdopt() again (e.g. in a different sync pass). func NewReplicaSetControllerRefManager( rsControl RSControlInterface, controller metav1.Object, @@ -291,10 +291,10 @@ func NewReplicaSetControllerRefManager( canAdopt func() error, ) *ReplicaSetControllerRefManager { return &ReplicaSetControllerRefManager{ - baseControllerRefManager: baseControllerRefManager{ - controller: controller, - selector: selector, - canAdoptFunc: canAdopt, + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, }, controllerKind: controllerKind, rsControl: rsControl, @@ -307,7 +307,7 @@ func NewReplicaSetControllerRefManager( // * Adopt orphans if the selector matches. // * Release owned objects if the selector no longer matches. // -// A non-nil error is returned if some form of reconciliation was attemped and +// A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation // is still needed. // @@ -319,7 +319,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep var errlist []error match := func(obj metav1.Object) bool { - return m.selector.Matches(labels.Set(obj.GetLabels())) + return m.Selector.Matches(labels.Set(obj.GetLabels())) } adopt := func(obj metav1.Object) error { return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet)) @@ -329,7 +329,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep } for _, rs := range sets { - ok, err := m.claimObject(rs, match, adopt, release) + ok, err := m.ClaimObject(rs, match, adopt, release) if err != nil { errlist = append(errlist, err) continue @@ -344,7 +344,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep // AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns // the error if the patching fails. func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error { - if err := m.canAdopt(); err != nil { + if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err) } // Note that ValidateOwnerReferences() will reject this patch if another @@ -352,7 +352,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS addControllerPatch := fmt.Sprintf( `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.controller.GetName(), m.controller.GetUID(), rs.UID) + m.Controller.GetName(), m.Controller.GetUID(), rs.UID) return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch)) } @@ -360,8 +360,8 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error { glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", - replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), replicaSet.UID) + replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) if err != nil { if errors.IsNotFound(err) { @@ -379,9 +379,9 @@ func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extension return err } -// RecheckDeletionTimestamp returns a canAdopt() function to recheck deletion. +// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion. // -// The canAdopt() function calls getObject() to fetch the latest value, +// The CanAdopt() function calls getObject() to fetch the latest value, // and denies adoption attempts if that object has a non-nil DeletionTimestamp. func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error { return func() error { @@ -402,7 +402,7 @@ func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() er // categories and accordingly adopt or release them. See comments on these functions // for more details. type ControllerRevisionControllerRefManager struct { - baseControllerRefManager + BaseControllerRefManager controllerKind schema.GroupVersionKind crControl ControllerRevisionControlInterface } @@ -426,10 +426,10 @@ func NewControllerRevisionControllerRefManager( canAdopt func() error, ) *ControllerRevisionControllerRefManager { return &ControllerRevisionControllerRefManager{ - baseControllerRefManager: baseControllerRefManager{ - controller: controller, - selector: selector, - canAdoptFunc: canAdopt, + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, }, controllerKind: controllerKind, crControl: crControl, @@ -454,7 +454,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor var errlist []error match := func(obj metav1.Object) bool { - return m.selector.Matches(labels.Set(obj.GetLabels())) + return m.Selector.Matches(labels.Set(obj.GetLabels())) } adopt := func(obj metav1.Object) error { return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision)) @@ -464,7 +464,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor } for _, h := range histories { - ok, err := m.claimObject(h, match, adopt, release) + ok, err := m.ClaimObject(h, match, adopt, release) if err != nil { errlist = append(errlist, err) continue @@ -479,7 +479,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor // AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if // the patching fails. func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error { - if err := m.canAdopt(); err != nil { + if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err) } // Note that ValidateOwnerReferences() will reject this patch if another @@ -487,7 +487,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history addControllerPatch := fmt.Sprintf( `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, m.controllerKind.GroupVersion(), m.controllerKind.Kind, - m.controller.GetName(), m.controller.GetUID(), history.UID) + m.Controller.GetName(), m.Controller.GetUID(), history.UID) return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch)) } @@ -495,8 +495,8 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error { glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", - history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName()) - deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), history.UID) + history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) if err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index c45e17ab30a..529b7f60c40 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -1022,28 +1022,53 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { return dsc.updateDaemonSetStatus(ds, hash) } -// hasIntentionalPredicatesReasons checks if any of the given predicate failure reasons -// is intentional. -func hasIntentionalPredicatesReasons(reasons []algorithm.PredicateFailureReason) bool { - for _, r := range reasons { - switch reason := r.(type) { - case *predicates.PredicateFailureError: - switch reason { - // intentional - case - predicates.ErrNodeSelectorNotMatch, - predicates.ErrPodNotMatchHostName, - predicates.ErrNodeLabelPresenceViolated, - // this one is probably intentional since it's a workaround for not having - // pod hard anti affinity. - predicates.ErrPodNotFitsHostPorts, - // DaemonSet is expected to respect taints and tolerations - predicates.ErrTaintsTolerationsNotMatch: - return true - } - } +func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *extensions.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { + // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. + // Add infinite toleration for taint notReady:NoExecute here + // to survive taint-based eviction enforced by NodeController + // when node turns not ready. + v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{ + Key: algorithm.TaintNodeNotReady, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + }) + + // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. + // Add infinite toleration for taint unreachable:NoExecute here + // to survive taint-based eviction enforced by NodeController + // when node turns unreachable. + v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{ + Key: algorithm.TaintNodeUnreachable, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + }) + + pods := []*v1.Pod{} + + podList, err := dsc.podLister.List(labels.Everything()) + if err != nil { + return nil, nil, err } - return false + for _, pod := range podList { + if pod.Spec.NodeName != node.Name { + continue + } + if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { + continue + } + // ignore pods that belong to the daemonset when taking into account whether + // a daemonset should bind to a node. + if controllerRef := controller.GetControllerOf(pod); controllerRef != nil && controllerRef.UID == ds.UID { + continue + } + pods = append(pods, pod) + } + + nodeInfo := schedulercache.NewNodeInfo(pods...) + nodeInfo.SetNode(node) + + _, reasons, err := Predicates(newPod, nodeInfo) + return reasons, nodeInfo, err } // nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a @@ -1086,71 +1111,43 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten } } - // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. - // Add infinite toleration for taint notReady:NoExecute here - // to survive taint-based eviction enforced by NodeController - // when node turns not ready. - v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{ - Key: algorithm.TaintNodeNotReady, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoExecute, - }) - - // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. - // Add infinite toleration for taint unreachable:NoExecute here - // to survive taint-based eviction enforced by NodeController - // when node turns unreachable. - v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{ - Key: algorithm.TaintNodeUnreachable, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoExecute, - }) - - pods := []*v1.Pod{} - - podList, err := dsc.podLister.List(labels.Everything()) - if err != nil { - return false, false, false, err - } - for _, pod := range podList { - if pod.Spec.NodeName != node.Name { - continue - } - if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { - continue - } - // ignore pods that belong to the daemonset when taking into account whether - // a daemonset should bind to a node. - if controllerRef := controller.GetControllerOf(pod); controllerRef != nil && controllerRef.UID == ds.UID { - continue - } - pods = append(pods, pod) - } - - nodeInfo := schedulercache.NewNodeInfo(pods...) - nodeInfo.SetNode(node) - _, reasons, err := Predicates(newPod, nodeInfo) + reasons, nodeInfo, err := dsc.simulate(newPod, node, ds) if err != nil { glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err) return false, false, false, err } - // Return directly if there is any intentional predicate failure reason, so that daemonset controller skips - // checking other predicate failures, such as InsufficientResourceError and unintentional errors. - if hasIntentionalPredicatesReasons(reasons) { - return false, false, false, nil - } + var insufficientResourceErr error for _, r := range reasons { glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) switch reason := r.(type) { case *predicates.InsufficientResourceError: - dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error()) - shouldSchedule = false + insufficientResourceErr = reason case *predicates.PredicateFailureError: var emitEvent bool + // we try to partition predicates into two partitions here: intentional on the part of the operator and not. switch reason { - // unintentional predicates reasons need to be fired out to event. + // intentional + case + predicates.ErrNodeSelectorNotMatch, + predicates.ErrPodNotMatchHostName, + predicates.ErrNodeLabelPresenceViolated, + // this one is probably intentional since it's a workaround for not having + // pod hard anti affinity. + predicates.ErrPodNotFitsHostPorts: + return false, false, false, nil + case predicates.ErrTaintsTolerationsNotMatch: + // DaemonSet is expected to respect taints and tolerations + fitsNoExecute, _, err := predicates.PodToleratesNodeNoExecuteTaints(newPod, nil, nodeInfo) + if err != nil { + return false, false, false, err + } + if !fitsNoExecute { + return false, false, false, nil + } + wantToRun, shouldSchedule = false, false + // unintentional case predicates.ErrDiskConflict, predicates.ErrVolumeZoneConflict, @@ -1178,6 +1175,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten } } } + // only emit this event if insufficient resource is the only thing + // preventing the daemon pod from scheduling + if shouldSchedule && insufficientResourceErr != nil { + dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, insufficientResourceErr.Error()) + shouldSchedule = false + } return } diff --git a/pkg/controller/daemon/daemoncontroller_test.go b/pkg/controller/daemon/daemoncontroller_test.go index 5a853ccd575..e6a17b474be 100644 --- a/pkg/controller/daemon/daemoncontroller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -60,6 +60,7 @@ var ( var ( noScheduleTolerations = []v1.Toleration{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}} noScheduleTaints = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}} + noExecuteTaints = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoExecute"}} ) var ( @@ -291,7 +292,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, clientset := fake.NewSimpleClientset(initialObjects...) informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) - manager := NewDaemonSetsController( + dsc := NewDaemonSetsController( informerFactory.Extensions().V1beta1().DaemonSets(), informerFactory.Apps().V1beta1().ControllerRevisions(), informerFactory.Core().V1().Pods(), @@ -300,18 +301,18 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, ) fakeRecorder := record.NewFakeRecorder(100) - manager.eventRecorder = fakeRecorder + dsc.eventRecorder = fakeRecorder - manager.podStoreSynced = alwaysReady - manager.nodeStoreSynced = alwaysReady - manager.dsStoreSynced = alwaysReady - manager.historyStoreSynced = alwaysReady + dsc.podStoreSynced = alwaysReady + dsc.nodeStoreSynced = alwaysReady + dsc.dsStoreSynced = alwaysReady + dsc.historyStoreSynced = alwaysReady podControl := newFakePodControl() - manager.podControl = podControl + dsc.podControl = podControl podControl.podStore = informerFactory.Core().V1().Pods().Informer().GetStore() return &daemonSetsController{ - manager, + dsc, informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(), informerFactory.Apps().V1beta1().ControllerRevisions().Informer().GetStore(), informerFactory.Core().V1().Pods().Informer().GetStore(), @@ -320,13 +321,16 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, }, podControl, clientset } -func validateSyncDaemonSets(t *testing.T, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int) { +func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { if len(fakePodControl.Templates) != expectedCreates { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != expectedDeletes { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) } + if len(manager.fakeRecorder.Events) != expectedEvents { + t.Errorf("Unexpected number of events. Expected %d, saw %d\n", expectedEvents, len(manager.fakeRecorder.Events)) + } // Every Pod created should have a ControllerRef. if got, want := len(fakePodControl.ControllerRefs), expectedCreates; got != want { t.Errorf("len(ControllerRefs) = %v, want %v", got, want) @@ -345,13 +349,13 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *fakePodControl, expect } } -func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int) { +func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { key, err := controller.KeyFunc(ds) if err != nil { t.Errorf("Could not get key for daemon.") } manager.syncHandler(key) - validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes) + validateSyncDaemonSets(t, manager, podControl, expectedCreates, expectedDeletes, expectedEvents) } // clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations. @@ -402,7 +406,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController(ds) addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) } } @@ -425,7 +429,7 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { manager.dsStore.Add(ds) addNodes(manager.nodeStore, 0, 5, nil) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) // Make sure the single sync() updated Status already for the change made // during the manage() phase. @@ -442,7 +446,7 @@ func TestNoNodesDoesNothing(t *testing.T) { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -455,7 +459,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { manager, podControl, _ := newTestController(ds) manager.nodeStore.Add(newNode("only-node", nil)) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -471,7 +475,7 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) { } manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -485,7 +489,7 @@ func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}} manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -533,7 +537,14 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { Spec: podSpec, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + switch strategy.Type { + case extensions.OnDeleteDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) + case extensions.RollingUpdateDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) + } } } @@ -553,7 +564,14 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) Spec: podSpec, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + switch strategy.Type { + case extensions.OnDeleteDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) + case extensions.RollingUpdateDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) + } } } @@ -571,7 +589,7 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod( manager.nodeStore.Add(node1) manager.nodeStore.Add(node2) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) // we do not expect any event for insufficient free resource if len(manager.fakeRecorder.Events) != 0 { t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events) @@ -593,7 +611,7 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { Status: v1.PodStatus{Phase: v1.PodSucceeded}, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 1) } } @@ -612,7 +630,7 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { Spec: podSpec, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 1) } } @@ -630,7 +648,7 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -651,7 +669,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) { Spec: podSpec, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -674,7 +692,7 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) manager.podStore.Add(pod) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -700,7 +718,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -727,7 +745,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { manager.dsStore.Add(ds) pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) manager.podStore.Add(pod) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -760,7 +778,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { Spec: podSpec1, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -798,7 +816,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 1) } } @@ -814,7 +832,7 @@ func TestDealsWithExistingPods(t *testing.T) { addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) - syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) + syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5, 0) } } @@ -828,7 +846,7 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } @@ -846,7 +864,7 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4, 0) } } @@ -868,7 +886,7 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20, 0) } } @@ -882,7 +900,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) { ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -895,7 +913,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController(ds) addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -908,7 +926,7 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) { manager, podControl, _ := newTestController(ds) addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -923,7 +941,7 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -938,7 +956,7 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -950,7 +968,7 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 0, 0) } // Daemon with node affinity should launch pods on nodes matching affinity. @@ -980,7 +998,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } @@ -1004,7 +1022,7 @@ func TestNumberReadyStatus(t *testing.T) { addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) if updated.Status.NumberReady != 0 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } @@ -1016,7 +1034,7 @@ func TestNumberReadyStatus(t *testing.T) { pod.Status.Conditions = append(pod.Status.Conditions, condition) } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) if updated.Status.NumberReady != 2 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } @@ -1044,7 +1062,7 @@ func TestObservedGeneration(t *testing.T) { addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) if updated.Status.ObservedGeneration != ds.Generation { t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) } @@ -1054,14 +1072,14 @@ func TestObservedGeneration(t *testing.T) { // DaemonSet controller should kill all failed pods and create at most 1 pod on every node. func TestDaemonKillFailedPods(t *testing.T) { tests := []struct { - numFailedPods, numNormalPods, expectedCreates, expectedDeletes int - test string + numFailedPods, numNormalPods, expectedCreates, expectedDeletes, expectedEvents int + test string }{ - {numFailedPods: 0, numNormalPods: 1, expectedCreates: 0, expectedDeletes: 0, test: "normal (do nothing)"}, - {numFailedPods: 0, numNormalPods: 0, expectedCreates: 1, expectedDeletes: 0, test: "no pods (create 1)"}, - {numFailedPods: 1, numNormalPods: 0, expectedCreates: 0, expectedDeletes: 1, test: "1 failed pod (kill 1), 0 normal pod (create 0; will create in the next sync)"}, - {numFailedPods: 1, numNormalPods: 3, expectedCreates: 0, expectedDeletes: 3, test: "1 failed pod (kill 1), 3 normal pods (kill 2)"}, - {numFailedPods: 2, numNormalPods: 1, expectedCreates: 0, expectedDeletes: 2, test: "2 failed pods (kill 2), 1 normal pod"}, + {numFailedPods: 0, numNormalPods: 1, expectedCreates: 0, expectedDeletes: 0, expectedEvents: 0, test: "normal (do nothing)"}, + {numFailedPods: 0, numNormalPods: 0, expectedCreates: 1, expectedDeletes: 0, expectedEvents: 0, test: "no pods (create 1)"}, + {numFailedPods: 1, numNormalPods: 0, expectedCreates: 0, expectedDeletes: 1, expectedEvents: 1, test: "1 failed pod (kill 1), 0 normal pod (create 0; will create in the next sync)"}, + {numFailedPods: 1, numNormalPods: 3, expectedCreates: 0, expectedDeletes: 3, expectedEvents: 1, test: "1 failed pod (kill 1), 3 normal pods (kill 2)"}, + {numFailedPods: 2, numNormalPods: 1, expectedCreates: 0, expectedDeletes: 2, expectedEvents: 2, test: "2 failed pods (kill 2), 1 normal pod"}, } for _, test := range tests { @@ -1074,15 +1092,51 @@ func TestDaemonKillFailedPods(t *testing.T) { addNodes(manager.nodeStore, 0, 1, nil) addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods) - syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes) + syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents) } } } -// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. -func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) { +// Daemonset should not remove a running pod from a node if the pod doesn't +// tolerate the nodes NoSchedule taint +func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { for _, strategy := range updateStrategies() { - ds := newDaemonSet("untolerate") + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _ := newTestController(ds) + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noScheduleTaints) + manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + } +} + +// Daemonset should remove a running pod from a node if the pod doesn't +// tolerate the nodes NoExecute taint +func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _ := newTestController(ds) + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noExecuteTaints) + manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + } +} + +// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. +func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") ds.Spec.UpdateStrategy = *strategy manager, podControl, _ := newTestController(ds) @@ -1091,7 +1145,7 @@ func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -1108,7 +1162,7 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1127,7 +1181,7 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1146,7 +1200,7 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1162,7 +1216,7 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) { manager.nodeStore.Add(node) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1189,11 +1243,11 @@ func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { // Without enabling critical pod annotation feature gate, we shouldn't create critical pod utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False") manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) // Enabling critical pod annotation feature gate should create critical pod utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1217,11 +1271,25 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { // Without enabling critical pod annotation feature gate, we shouldn't create critical pod utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False") manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + switch strategy.Type { + case extensions.OnDeleteDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) + case extensions.RollingUpdateDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) + } // Enabling critical pod annotation feature gate should create critical pod utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + switch strategy.Type { + case extensions.OnDeleteDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2) + case extensions.RollingUpdateDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) + } } } @@ -1249,7 +1317,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { ds.Spec.Template.Spec = podSpec setDaemonSetCritical(ds) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -1468,7 +1536,7 @@ func TestUpdateNode(t *testing.T) { manager.nodeStore.Add(c.oldNode) c.ds.Spec.UpdateStrategy = *strategy manager.dsStore.Add(c.ds) - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0) manager.enqueueDaemonSet = func(ds *extensions.DaemonSet) { if ds.Name == "ds" { diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 1b90829c377..452b6459699 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -30,7 +30,7 @@ func TestDaemonSetUpdatesPods(t *testing.T) { maxUnavailable := 2 addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" @@ -41,25 +41,25 @@ func TestDaemonSetUpdatesPods(t *testing.T) { manager.dsStore.Update(ds) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) clearExpectations(t, manager, ds, podControl) } @@ -69,7 +69,7 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) { maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" @@ -81,12 +81,12 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) { // new pods are not ready numUnavailable == maxUnavailable clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) clearExpectations(t, manager, ds, podControl) } @@ -96,7 +96,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType @@ -107,12 +107,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { // all old pods are unavailable so should be removed clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) clearExpectations(t, manager, ds, podControl) } @@ -122,7 +122,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) @@ -131,7 +131,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { // template is not changed no pod should be removed clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) clearExpectations(t, manager, ds, podControl) } diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index b9e05788f53..0156afd7326 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -356,7 +356,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) { glog.V(4).Infof("Pod %s deleted.", pod.Name) if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == extensions.RecreateDeploymentStrategyType { // Sync if this Deployment now has no more Pods. - rsList, err := dc.getReplicaSetsForDeployment(d) + rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client)) if err != nil { return } @@ -614,25 +614,6 @@ func (dc *DeploymentController) syncDeployment(key string) error { return dc.syncStatusOnly(d, rsList, podMap) } - // Why run the cleanup policy only when there is no rollback request? - // The thing with the cleanup policy currently is that it is far from smart because it takes into account - // the latest replica sets while it should instead retain the latest *working* replica sets. This means that - // you can have a cleanup policy of 1 but your last known working replica set may be 2 or 3 versions back - // in the history. - // Eventually we will want to find a way to recognize replica sets that have worked at some point in time - // (and chances are higher that they will work again as opposed to others that didn't) for candidates to - // automatically roll back to (#23211) and the cleanup policy should help. - if d.Spec.RollbackTo == nil { - _, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) - if err != nil { - return err - } - // So far the cleanup policy was executed once a deployment was paused, scaled up/down, or it - // successfully completed deploying a replica set. Decouple it from the strategies and have it - // run almost unconditionally - cleanupDeployment is safe by default. - dc.cleanupDeployment(oldRSs, d) - } - // Update deployment conditions with an Unknown condition when pausing/resuming // a deployment. In this way, we can be sure that we won't timeout when a user // resumes a Deployment with a set progressDeadlineSeconds. diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index 837aeba9662..3f213e1b909 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -389,11 +389,81 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) { foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType - rs := newReplicaSet(foo, "foo-1", 1) - pod := generatePodFromRS(rs) + rs1 := newReplicaSet(foo, "foo-1", 1) + rs2 := newReplicaSet(foo, "foo-1", 1) + pod1 := generatePodFromRS(rs1) + pod2 := generatePodFromRS(rs2) f.dLister = append(f.dLister, foo) - f.rsLister = append(f.rsLister, rs) + // Let's pretend this is a different pod. The gist is that the pod lister needs to + // return a non-empty list. + f.podLister = append(f.podLister, pod1, pod2) + + c, _ := f.newController() + enqueued := false + c.enqueueDeployment = func(d *extensions.Deployment) { + if d.Name == "foo" { + enqueued = true + } + } + + c.deletePod(pod1) + + if enqueued { + t.Errorf("expected deployment %q not to be queued after pod deletion", foo.Name) + } +} + +// TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment ensures that +// the deletion of a pod will requeue a Recreate deployment iff there is no other +// pod returned from the client in the case where a deployment has multiple replica +// sets, some of which have empty owner references. +func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testing.T) { + f := newFixture(t) + + foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + rs1 := newReplicaSet(foo, "foo-1", 1) + rs2 := newReplicaSet(foo, "foo-2", 2) + rs2.OwnerReferences = nil + pod := generatePodFromRS(rs1) + + f.dLister = append(f.dLister, foo) + f.rsLister = append(f.rsLister, rs1, rs2) + f.objects = append(f.objects, foo, rs1, rs2) + + c, _ := f.newController() + enqueued := false + c.enqueueDeployment = func(d *extensions.Deployment) { + if d.Name == "foo" { + enqueued = true + } + } + + c.deletePod(pod) + + if !enqueued { + t.Errorf("expected deployment %q to be queued after pod deletion", foo.Name) + } +} + +// TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment that the +// deletion of a pod will not requeue a Recreate deployment iff there are other pods +// returned from the client in the case where a deployment has multiple replica sets, +// some of which have empty owner references. +func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t *testing.T) { + f := newFixture(t) + + foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + rs1 := newReplicaSet(foo, "foo-1", 1) + rs2 := newReplicaSet(foo, "foo-2", 2) + rs2.OwnerReferences = nil + pod := generatePodFromRS(rs1) + + f.dLister = append(f.dLister, foo) + f.rsLister = append(f.rsLister, rs1, rs2) + f.objects = append(f.objects, foo, rs1, rs2) // Let's pretend this is a different pod. The gist is that the pod lister needs to // return a non-empty list. f.podLister = append(f.podLister, pod) diff --git a/pkg/controller/deployment/recreate.go b/pkg/controller/deployment/recreate.go index b0f438fc6f5..4d2f2337d7a 100644 --- a/pkg/controller/deployment/recreate.go +++ b/pkg/controller/deployment/recreate.go @@ -63,6 +63,12 @@ func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList return err } + if util.DeploymentComplete(d, &d.Status) { + if err := dc.cleanupDeployment(oldRSs, d); err != nil { + return err + } + } + // Sync deployment status. return dc.syncRolloutStatus(allRSs, newRS, d) } diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 3c1405a68f4..598928366d7 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -57,6 +57,12 @@ func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList return dc.syncRolloutStatus(allRSs, newRS, d) } + if deploymentutil.DeploymentComplete(d, &d.Status) { + if err := dc.cleanupDeployment(oldRSs, d); err != nil { + return err + } + } + // Sync deployment status return dc.syncRolloutStatus(allRSs, newRS, d) } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 8b0408bb433..88b7395a0f5 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -59,6 +59,13 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi return err } + // Clean up the deployment when it's paused and no rollback is in flight. + if d.Spec.Paused && d.Spec.RollbackTo == nil { + if err := dc.cleanupDeployment(oldRSs, d); err != nil { + return err + } + } + allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, d) } @@ -552,7 +559,6 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes)) glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name) - var errList []error for i := int32(0); i < diff; i++ { rs := cleanableRSes[i] // Avoid delete replica set with non-zero replica counts @@ -561,12 +567,13 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe } glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { - glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err) - errList = append(errList, err) + // Return error instead of aggregating and continuing DELETEs on the theory + // that we may be overloading the api server. + return err } } - return utilerrors.NewAggregate(errList) + return nil } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index 578b409f210..eecde6c60c7 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -483,7 +483,7 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. // The third returned value is the new replica set, and it may be nil if it doesn't exist yet. func GetAllReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, rsListFromClient(c)) + rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, nil, err } @@ -501,7 +501,7 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c clientset.Interface) // GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, rsListFromClient(c)) + rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, err } @@ -511,15 +511,15 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. // Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, rsListFromClient(c)) + rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, err } return FindNewReplicaSet(deployment, rsList) } -// rsListFromClient returns an rsListFunc that wraps the given client. -func rsListFromClient(c clientset.Interface) rsListFunc { +// RsListFromClient returns an rsListFunc that wraps the given client. +func RsListFromClient(c clientset.Interface) RsListFunc { return func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) if err != nil { @@ -541,14 +541,14 @@ func podListFromClient(c clientset.Interface) podListFunc { } // TODO: switch this to full namespacers -type rsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) +type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) // ListReplicaSets returns a slice of RSes the given deployment targets. // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), // because only the controller itself should do that. // However, it does filter out anything whose ControllerRef doesn't match. -func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) { +func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([]*extensions.ReplicaSet, error) { // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. namespace := deployment.Namespace diff --git a/pkg/controller/endpoint/BUILD b/pkg/controller/endpoint/BUILD index 73c88318711..b56e9d96020 100644 --- a/pkg/controller/endpoint/BUILD +++ b/pkg/controller/endpoint/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/client/listers/core/v1:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", @@ -34,6 +33,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], ) diff --git a/pkg/controller/endpoint/OWNERS b/pkg/controller/endpoint/OWNERS index dffc5525e78..3bb8ef23f46 100755 --- a/pkg/controller/endpoint/OWNERS +++ b/pkg/controller/endpoint/OWNERS @@ -1,5 +1,4 @@ reviewers: -- bprashanth - bowei - MrHohn - thockin diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index f13b80f8de0..00ff8193de8 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -30,13 +30,13 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1/endpoints" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" @@ -354,8 +354,6 @@ func (e *EndpointController) syncService(key string) error { return err } - subsets := []v1.EndpointSubset{} - var tolerateUnreadyEndpoints bool if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok { b, err := strconv.ParseBool(v) @@ -366,61 +364,59 @@ func (e *EndpointController) syncService(key string) error { } } - readyEps := 0 - notReadyEps := 0 + subsets := []v1.EndpointSubset{} + var totalReadyEps int = 0 + var totalNotReadyEps int = 0 + for _, pod := range pods { + if len(pod.Status.PodIP) == 0 { + glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) + continue + } + if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil { + glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name) + continue + } - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] + epa := v1.EndpointAddress{ + IP: pod.Status.PodIP, + NodeName: &pod.Spec.NodeName, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Namespace: pod.ObjectMeta.Namespace, + Name: pod.ObjectMeta.Name, + UID: pod.ObjectMeta.UID, + ResourceVersion: pod.ObjectMeta.ResourceVersion, + }} - portName := servicePort.Name - portProto := servicePort.Protocol - portNum, err := podutil.FindPort(pod, servicePort) - if err != nil { - glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) - continue - } - if len(pod.Status.PodIP) == 0 { - glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) - continue - } - if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil { - glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name) - continue + hostname := pod.Spec.Hostname + if len(hostname) > 0 && pod.Spec.Subdomain == service.Name && service.Namespace == pod.Namespace { + epa.Hostname = hostname + } + + // Allow headless service not to have ports. + if len(service.Spec.Ports) == 0 { + if service.Spec.ClusterIP == api.ClusterIPNone { + epp := v1.EndpointPort{Port: 0, Protocol: v1.ProtocolTCP} + subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints) } + } else { + for i := range service.Spec.Ports { + servicePort := &service.Spec.Ports[i] - epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} - epa := v1.EndpointAddress{ - IP: pod.Status.PodIP, - NodeName: &pod.Spec.NodeName, - TargetRef: &v1.ObjectReference{ - Kind: "Pod", - Namespace: pod.ObjectMeta.Namespace, - Name: pod.ObjectMeta.Name, - UID: pod.ObjectMeta.UID, - ResourceVersion: pod.ObjectMeta.ResourceVersion, - }} + portName := servicePort.Name + portProto := servicePort.Protocol + portNum, err := podutil.FindPort(pod, servicePort) + if err != nil { + glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) + continue + } - hostname := pod.Spec.Hostname - if len(hostname) > 0 && - pod.Spec.Subdomain == service.Name && - service.Namespace == pod.Namespace { - epa.Hostname = hostname - } - - if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) { - subsets = append(subsets, v1.EndpointSubset{ - Addresses: []v1.EndpointAddress{epa}, - Ports: []v1.EndpointPort{epp}, - }) - readyEps++ - } else { - glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name) - subsets = append(subsets, v1.EndpointSubset{ - NotReadyAddresses: []v1.EndpointAddress{epa}, - Ports: []v1.EndpointPort{epp}, - }) - notReadyEps++ + var readyEps, notReadyEps int + epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} + subsets, readyEps, notReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints) + totalReadyEps = totalReadyEps + readyEps + totalNotReadyEps = totalNotReadyEps + notReadyEps } } } @@ -457,7 +453,7 @@ func (e *EndpointController) syncService(key string) error { newEndpoints.Annotations = make(map[string]string) } - glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, readyEps, notReadyEps) + glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) createEndpoints := len(currentEndpoints.ResourceVersion) == 0 if createEndpoints { // No previous endpoints, create them @@ -508,3 +504,35 @@ func (e *EndpointController) checkLeftoverEndpoints() { e.queue.Add(key) } } + +func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.EndpointAddress, + epp v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) { + var readyEps int = 0 + var notReadyEps int = 0 + if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) { + subsets = append(subsets, v1.EndpointSubset{ + Addresses: []v1.EndpointAddress{epa}, + Ports: []v1.EndpointPort{epp}, + }) + readyEps++ + } else if shouldPodBeInEndpoints(pod) { + glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name) + subsets = append(subsets, v1.EndpointSubset{ + NotReadyAddresses: []v1.EndpointAddress{epa}, + Ports: []v1.EndpointPort{epp}, + }) + notReadyEps++ + } + return subsets, readyEps, notReadyEps +} + +func shouldPodBeInEndpoints(pod *v1.Pod) bool { + switch pod.Spec.RestartPolicy { + case v1.RestartPolicyNever: + return pod.Status.Phase != v1.PodFailed && pod.Status.Phase != v1.PodSucceeded + case v1.RestartPolicyOnFailure: + return pod.Status.Phase != v1.PodSucceeded + default: + return true + } +} diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index bd3a81a60bf..89a181557a6 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -76,6 +76,38 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea } } +func addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(store cache.Store, namespace string, nPods int, nPorts int, restartPolicy v1.RestartPolicy, podPhase v1.PodPhase) { + for i := 0; i < nPods; i++ { + p := &v1.Pod{ + TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("pod%d", i), + Labels: map[string]string{"foo": "bar"}, + }, + Spec: v1.PodSpec{ + RestartPolicy: restartPolicy, + Containers: []v1.Container{{Ports: []v1.ContainerPort{}}}, + }, + Status: v1.PodStatus{ + PodIP: fmt.Sprintf("1.2.3.%d", 4+i), + Phase: podPhase, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + }, + }, + } + for j := 0; j < nPorts; j++ { + p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports, + v1.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)}) + } + store.Add(p) + } +} + type serverResponse struct { statusCode int obj interface{} @@ -620,3 +652,243 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) { }() } } + +func TestSyncEndpointsHeadlessService(t *testing.T) { + ns := "headless" + testServer, endpointsHandler := makeTestServer(t, ns) + defer testServer.Close() + endpoints := newController(testServer.URL) + endpoints.endpointsStore.Add(&v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, + Ports: []v1.EndpointPort{{Port: 1000, Protocol: "TCP"}}, + }}, + }) + addPods(endpoints.podStore, ns, 1, 1, 0) + endpoints.serviceStore.Add(&v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: v1.ServiceSpec{ + Selector: map[string]string{}, + ClusterIP: api.ClusterIPNone, + Ports: []v1.ServicePort{}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []v1.EndpointPort{{Port: 0, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequestCount(t, 1) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFailed(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns) + defer testServer.Close() + endpoints := newController(testServer.URL) + endpoints.endpointsStore.Add(&v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Subsets: []v1.EndpointSubset{}, + }) + addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(endpoints.podStore, ns, 1, 1, v1.RestartPolicyNever, v1.PodFailed) + endpoints.serviceStore.Add(&v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []v1.EndpointSubset{}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucceeded(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns) + defer testServer.Close() + endpoints := newController(testServer.URL) + endpoints.endpointsStore.Add(&v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Subsets: []v1.EndpointSubset{}, + }) + addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(endpoints.podStore, ns, 1, 1, v1.RestartPolicyNever, v1.PodSucceeded) + endpoints.serviceStore.Add(&v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []v1.EndpointSubset{}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhaseSucceeded(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns) + defer testServer.Close() + endpoints := newController(testServer.URL) + endpoints.endpointsStore.Add(&v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Subsets: []v1.EndpointSubset{}, + }) + addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(endpoints.podStore, ns, 1, 1, v1.RestartPolicyOnFailure, v1.PodSucceeded) + endpoints.serviceStore.Add(&v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []v1.EndpointSubset{}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +// There are 3*5 possibilities(3 types of RestartPolicy by 5 types of PodPhase). Not list them all here. +// Just list all of the 3 false cases and 3 of the 12 true cases. +func TestShouldPodBeInEndpoints(t *testing.T) { + testCases := []struct { + name string + pod *v1.Pod + expected bool + }{ + // Pod should not be in endpoints cases: + { + name: "Failed pod with Never RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, + }, + expected: false, + }, + { + name: "Succeeded pod with Never RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + }, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + expected: false, + }, + { + name: "Succeeded pod with OnFailure RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyOnFailure, + }, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + expected: false, + }, + // Pod should be in endpoints cases: + { + name: "Failed pod with Always RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, + }, + expected: true, + }, + { + name: "Pending pod with Never RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + expected: true, + }, + { + name: "Unknown pod with OnFailure RestartPolicy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyOnFailure, + }, + Status: v1.PodStatus{ + Phase: v1.PodUnknown, + }, + }, + expected: true, + }, + } + for _, test := range testCases { + result := shouldPodBeInEndpoints(test.pod) + if result != test.expected { + t.Errorf("%s: expected : %t, got: %t", test.name, test.expected, result) + } + } +} diff --git a/pkg/controller/garbagecollector/BUILD b/pkg/controller/garbagecollector/BUILD index 58901c622b3..b6e88d24c4a 100644 --- a/pkg/controller/garbagecollector/BUILD +++ b/pkg/controller/garbagecollector/BUILD @@ -15,7 +15,6 @@ go_library( "garbagecollector.go", "graph.go", "graph_builder.go", - "metrics.go", "operations.go", "patch.go", "rate_limiter_helper.go", @@ -31,7 +30,6 @@ go_library( "//pkg/util/workqueue/prometheus:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -39,7 +37,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/controller/garbagecollector/OWNERS b/pkg/controller/garbagecollector/OWNERS index c1499f1fad5..7d9bdbd4ab6 100755 --- a/pkg/controller/garbagecollector/OWNERS +++ b/pkg/controller/garbagecollector/OWNERS @@ -1,3 +1,7 @@ +approvers: +- caesarxuchao +- lavalamp +- deads2k reviewers: - caesarxuchao - lavalamp diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 785bcf3792f..0cd52024961 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -137,8 +137,6 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) { go wait.Until(gc.runAttemptToOrphanWorker, 1*time.Second, stopCh) } - Register() - <-stopCh } diff --git a/pkg/controller/garbagecollector/garbagecollector_test.go b/pkg/controller/garbagecollector/garbagecollector_test.go index bfba3a314d4..994db10136e 100644 --- a/pkg/controller/garbagecollector/garbagecollector_test.go +++ b/pkg/controller/garbagecollector/garbagecollector_test.go @@ -272,7 +272,7 @@ func TestProcessEvent(t *testing.T) { var testScenarios = []struct { name string // a series of events that will be supplied to the - // GraphBuilder.eventQueue. + // GraphBuilder.graphChanges. events []event }{ { diff --git a/pkg/controller/garbagecollector/graph.go b/pkg/controller/garbagecollector/graph.go index 59b36c2ebfa..5ced32b7a97 100644 --- a/pkg/controller/garbagecollector/graph.go +++ b/pkg/controller/garbagecollector/graph.go @@ -34,8 +34,8 @@ func (s objectReference) String() string { return fmt.Sprintf("[%s/%s, namespace: %s, name: %s, uid: %s]", s.APIVersion, s.Kind, s.Namespace, s.Name, s.UID) } -// The single-threaded GraphBuilder.processEvent() is the sole writer of the -// nodes. The multi-threaded GarbageCollector.processItem() reads the nodes. +// The single-threaded GraphBuilder.processGraphChanges() is the sole writer of the +// nodes. The multi-threaded GarbageCollector.attemptToDeleteItem() reads the nodes. // WARNING: node has different locks on different fields. setters and getters // use the respective locks, so the return values of the getters can be // inconsistent. @@ -46,7 +46,7 @@ type node struct { // dependents are the nodes that have node.identity as a // metadata.ownerReference. dependents map[*node]struct{} - // this is set by processEvent() if the object has non-nil DeletionTimestamp + // this is set by processGraphChanges() if the object has non-nil DeletionTimestamp // and has the FinalizerDeleteDependents. deletingDependents bool deletingDependentsLock sync.RWMutex diff --git a/pkg/controller/garbagecollector/graph_builder.go b/pkg/controller/garbagecollector/graph_builder.go index 671db1e7c66..4f356667b4e 100644 --- a/pkg/controller/garbagecollector/graph_builder.go +++ b/pkg/controller/garbagecollector/graph_builder.go @@ -259,7 +259,7 @@ func (gb *GraphBuilder) enqueueChanges(e *event) { // addDependentToOwners adds n to owners' dependents list. If the owner does not // exist in the gb.uidToNode yet, a "virtual" node will be created to represent // the owner. The "virtual" node will be enqueued to the attemptToDelete, so that -// processItem() will verify if the owner exists according to the API server. +// attemptToDeleteItem() will verify if the owner exists according to the API server. func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerReference) { for _, owner := range owners { ownerNode, ok := gb.uidToNode.Read(owner.UID) diff --git a/pkg/controller/garbagecollector/metaonly/metaonly.go b/pkg/controller/garbagecollector/metaonly/metaonly.go index e9f2872ccf9..5f1db87d397 100644 --- a/pkg/controller/garbagecollector/metaonly/metaonly.go +++ b/pkg/controller/garbagecollector/metaonly/metaonly.go @@ -27,9 +27,6 @@ import ( "k8s.io/kubernetes/pkg/api" ) -func (obj *MetadataOnlyObject) GetObjectKind() schema.ObjectKind { return obj } -func (obj *MetadataOnlyObjectList) GetObjectKind() schema.ObjectKind { return obj } - type metaOnlyJSONScheme struct{} // This function can be extended to mapping different gvk to different MetadataOnlyObject, diff --git a/pkg/controller/garbagecollector/metrics.go b/pkg/controller/garbagecollector/metrics.go deleted file mode 100644 index 21eeafa4cb7..00000000000 --- a/pkg/controller/garbagecollector/metrics.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package garbagecollector - -import ( - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/clock" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - GarbageCollectSubsystem = "garbage_collector" - EventProcessingLatencyKey = "event_processing_latency_microseconds" - DirtyProcessingLatencyKey = "dirty_processing_latency_microseconds" - OrphanProcessingLatencyKey = "orphan_processing_latency_microseconds" -) - -var ( - EventProcessingLatency = prometheus.NewSummary( - prometheus.SummaryOpts{ - Subsystem: GarbageCollectSubsystem, - Name: EventProcessingLatencyKey, - Help: "Time in microseconds of an event spend in the eventQueue", - }, - ) - DirtyProcessingLatency = prometheus.NewSummary( - prometheus.SummaryOpts{ - Subsystem: GarbageCollectSubsystem, - Name: DirtyProcessingLatencyKey, - Help: "Time in microseconds of an item spend in the dirtyQueue", - }, - ) - OrphanProcessingLatency = prometheus.NewSummary( - prometheus.SummaryOpts{ - Subsystem: GarbageCollectSubsystem, - Name: OrphanProcessingLatencyKey, - Help: "Time in microseconds of an item spend in the orphanQueue", - }, - ) -) - -var registerMetrics sync.Once - -// Register all metrics. -func Register() { - // Register the metrics. - registerMetrics.Do(func() { - prometheus.MustRegister(EventProcessingLatency) - prometheus.MustRegister(DirtyProcessingLatency) - prometheus.MustRegister(OrphanProcessingLatency) - }) -} - -func sinceInMicroseconds(clock clock.Clock, start time.Time) float64 { - return float64(clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) -} diff --git a/pkg/controller/garbagecollector/operations.go b/pkg/controller/garbagecollector/operations.go index b488fb8d1a2..fcfdcd1cee6 100644 --- a/pkg/controller/garbagecollector/operations.go +++ b/pkg/controller/garbagecollector/operations.go @@ -115,7 +115,7 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string) for _, f := range finalizers { if f == targetFinalizer { found = true - break + continue } newFinalizers = append(newFinalizers, f) } diff --git a/pkg/controller/history/OWNERS b/pkg/controller/history/OWNERS index 4ff17cf2c72..389a4766b63 100755 --- a/pkg/controller/history/OWNERS +++ b/pkg/controller/history/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - enisoc - foxish - janetkuo @@ -7,7 +6,6 @@ approvers: - kow3ns - smarterclayton reviewers: -- bprashanth - enisoc - foxish - janetkuo diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 84fbd498709..7264dc79806 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -41,7 +41,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/api" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - nodeutil "k8s.io/kubernetes/pkg/api/v1/node" + v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1" @@ -453,7 +453,7 @@ func (nc *NodeController) doTaintingPass() { zone := utilnode.GetZoneKey(node) EvictionsNumber.WithLabelValues(zone).Inc() } - _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, condition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. taintToAdd := v1.Taint{} oppositeTaint := v1.Taint{} @@ -510,6 +510,26 @@ func (nc *NodeController) Run(stopCh <-chan struct{}) { <-stopCh } +// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor. +func (nc *NodeController) addPodEvictorForNewZone(node *v1.Node) { + zone := utilnode.GetZoneKey(node) + if _, found := nc.zoneStates[zone]; !found { + nc.zoneStates[zone] = stateInitial + if !nc.useTaintBasedEvictions { + nc.zonePodEvictor[zone] = + NewRateLimitedTimedQueue( + flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, evictionRateLimiterBurst)) + } else { + nc.zoneNotReadyOrUnreachableTainer[zone] = + NewRateLimitedTimedQueue( + flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, evictionRateLimiterBurst)) + } + // Init the metric for the new zone. + glog.Infof("Initializing eviction metric for zone: %v", zone) + EvictionsNumber.WithLabelValues(zone).Add(0) + } +} + // monitorNodeStatus verifies node status are constantly updated by kubelet, and if not, // post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or // not reachable for a long period of time. @@ -520,28 +540,17 @@ func (nc *NodeController) monitorNodeStatus() error { if err != nil { return err } - added, deleted := nc.checkForNodeAddedDeleted(nodes) + added, deleted, newZoneRepresentatives := nc.classifyNodes(nodes) + + for i := range newZoneRepresentatives { + nc.addPodEvictorForNewZone(newZoneRepresentatives[i]) + } + for i := range added { glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name) recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name)) nc.knownNodeSet[added[i].Name] = added[i] - // When adding new Nodes we need to check if new zone appeared, and if so add new evictor. - zone := utilnode.GetZoneKey(added[i]) - if _, found := nc.zoneStates[zone]; !found { - nc.zoneStates[zone] = stateInitial - if !nc.useTaintBasedEvictions { - nc.zonePodEvictor[zone] = - NewRateLimitedTimedQueue( - flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, evictionRateLimiterBurst)) - } else { - nc.zoneNotReadyOrUnreachableTainer[zone] = - NewRateLimitedTimedQueue( - flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, evictionRateLimiterBurst)) - } - // Init the metric for the new zone. - glog.Infof("Initializing eviction metric for zone: %v", zone) - EvictionsNumber.WithLabelValues(zone).Add(0) - } + nc.addPodEvictorForNewZone(added[i]) if nc.useTaintBasedEvictions { nc.markNodeAsHealthy(added[i]) } else { @@ -830,7 +839,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. var err error var gracePeriod time.Duration var observedReadyCondition v1.NodeCondition - _, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if currentReadyCondition == nil { // If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // A fake ready condition is created, where LastProbeTime and LastTransitionTime is set @@ -870,9 +879,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. // if that's the case, but it does not seem necessary. var savedCondition *v1.NodeCondition if found { - _, savedCondition = nodeutil.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady) + _, savedCondition = v1node.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady) } - _, observedCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, observedCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !found { glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) savedNodeStatus = nodeStatusData{ @@ -949,7 +958,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. remainingNodeConditionTypes := []v1.NodeConditionType{v1.NodeOutOfDisk, v1.NodeMemoryPressure, v1.NodeDiskPressure} nowTimestamp := nc.now() for _, nodeConditionType := range remainingNodeConditionTypes { - _, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType) + _, currentCondition := v1node.GetNodeCondition(&node.Status, nodeConditionType) if currentCondition == nil { glog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ @@ -972,7 +981,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. } } - _, currentCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { glog.Errorf("Error updating node %s: %v", node.Name, err) @@ -991,21 +1000,32 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. return gracePeriod, observedReadyCondition, currentReadyCondition, err } -func (nc *NodeController) checkForNodeAddedDeleted(nodes []*v1.Node) (added, deleted []*v1.Node) { - for i := range nodes { - if _, has := nc.knownNodeSet[nodes[i].Name]; !has { - added = append(added, nodes[i]) +// classifyNodes classifies the allNodes to three categories: +// 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet' +// 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes' +// 3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states +func (nc *NodeController) classifyNodes(allNodes []*v1.Node) (added, deleted, newZoneRepresentatives []*v1.Node) { + for i := range allNodes { + if _, has := nc.knownNodeSet[allNodes[i].Name]; !has { + added = append(added, allNodes[i]) + } else { + // Currently, we only consider new zone as updated. + zone := utilnode.GetZoneKey(allNodes[i]) + if _, found := nc.zoneStates[zone]; !found { + newZoneRepresentatives = append(newZoneRepresentatives, allNodes[i]) + } } } + // If there's a difference between lengths of known Nodes and observed nodes // we must have removed some Node. - if len(nc.knownNodeSet)+len(added) != len(nodes) { + if len(nc.knownNodeSet)+len(added) != len(allNodes) { knowSetCopy := map[string]*v1.Node{} for k, v := range nc.knownNodeSet { knowSetCopy[k] = v } - for i := range nodes { - delete(knowSetCopy, nodes[i].Name) + for i := range allNodes { + delete(knowSetCopy, allNodes[i].Name) } for i := range knowSetCopy { deleted = append(deleted, knowSetCopy[i]) diff --git a/pkg/controller/node/nodecontroller_test.go b/pkg/controller/node/nodecontroller_test.go index c3b5ef9c393..c5abe514773 100644 --- a/pkg/controller/node/nodecontroller_test.go +++ b/pkg/controller/node/nodecontroller_test.go @@ -133,6 +133,10 @@ func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler func TestMonitorNodeStatusEvictPods(t *testing.T) { fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute + labels := map[string]string{ + kubeletapis.LabelZoneRegion: "region1", + kubeletapis.LabelZoneFailureDomain: "zone1", + } // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady // we need second healthy node in tests. Because of how the tests are written we need to update @@ -202,6 +206,42 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { expectedEvictPods: false, description: "Node created recently, with no status.", }, + // Node created recently without FailureDomain labels which is added back later, with no status (happens only at cluster startup). + { + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: fakeNow, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: 0, + newNodeStatus: v1.NodeStatus{}, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "Node created recently without FailureDomain labels which is added back later, with no status (happens only at cluster startup).", + }, // Node created long time ago, and kubelet posted NotReady for a short period of time. { fakeNodeHandler: &testutil.FakeNodeHandler{ @@ -584,6 +624,10 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus } + if len(item.fakeNodeHandler.Existing[0].Labels) == 0 && len(item.fakeNodeHandler.Existing[1].Labels) == 0 { + item.fakeNodeHandler.Existing[0].Labels = labels + item.fakeNodeHandler.Existing[1].Labels = labels + } if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } @@ -1266,29 +1310,19 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("%v: unexpected error: %v", item.description, err) } - // Give some time for rate-limiter to reload - time.Sleep(500 * time.Millisecond) - for zone, state := range item.expectedFollowingStates { if state != nodeController.zoneStates[zone] { t.Errorf("%v: Unexpected zone state: %v: %v instead %v", item.description, zone, nodeController.zoneStates[zone], state) } } - zones := testutil.GetZones(fakeNodeHandler) - for _, zone := range zones { - // Time for rate-limiter reloading per node. - time.Sleep(50 * time.Millisecond) - nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) { - uid, _ := value.UID.(string) - deletePods(fakeNodeHandler, nodeController.recorder, value.Value, uid, nodeController.daemonSetStore) - return true, 0 - }) - } - - podEvicted := false - for _, action := range fakeNodeHandler.Actions() { - if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { - podEvicted = true + var podEvicted bool + start := time.Now() + // Infinite loop, used for retrying in case ratelimiter fails to reload for Try function. + // this breaks when we have the status that we need for test case or when we don't see the + // intended result after 1 minute. + for { + podEvicted = nodeController.doEviction(fakeNodeHandler) + if podEvicted == item.expectedEvictPods || time.Since(start) > 1*time.Minute { break } } @@ -1299,6 +1333,27 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { } } +// doEviction does the fake eviction and returns the status of eviction operation. +func (nc *nodeController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool { + var podEvicted bool + zones := testutil.GetZones(fakeNodeHandler) + for _, zone := range zones { + nc.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) { + uid, _ := value.UID.(string) + deletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore) + return true, 0 + }) + } + + for _, action := range fakeNodeHandler.Actions() { + if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { + podEvicted = true + return podEvicted + } + } + return podEvicted +} + // TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes // pods and the node when kubelet has not reported, and the cloudprovider says // the node is gone. diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 7971dd98050..de6134e410b 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -450,14 +450,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho case desiredReplicas > scaleUpLimit: setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate") desiredReplicas = scaleUpLimit - case desiredReplicas == 0: - // never scale down to 0, reserved for disabling autoscaling - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero") - desiredReplicas = 1 case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas: // make sure we aren't below our minimum setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was less than the minimum replica count") desiredReplicas = *hpa.Spec.MinReplicas + case desiredReplicas == 0: + // never scale down to 0, reserved for disabling autoscaling + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero") + desiredReplicas = 1 case desiredReplicas > hpa.Spec.MaxReplicas: // make sure we aren't above our maximum setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count") diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index ff5b7172337..6fe9371d60e 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -994,6 +994,25 @@ func TestMinReplicas(t *testing.T) { tc.runTest(t) } +func TestMinReplicasDesiredZero(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{0, 0, 0}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsApi: true, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.ScalingLimited, + Status: v1.ConditionTrue, + Reason: "TooFewReplicas", + }), + } + tc.runTest(t) +} + func TestZeroReplicas(t *testing.T) { tc := testCase{ minReplicas: 3, diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index b96cd59fcbc..361008583a5 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -250,9 +250,6 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet // re-run the utilization calculation with our new numbers newUsageRatio, _ := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization) - if err != nil { - return 0, utilization, err - } if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { // return the current replicas if the change would be too small, diff --git a/pkg/controller/replicaset/OWNERS b/pkg/controller/replicaset/OWNERS index 34b609fb948..cd1992c3e67 100755 --- a/pkg/controller/replicaset/OWNERS +++ b/pkg/controller/replicaset/OWNERS @@ -2,9 +2,7 @@ approvers: - caesarxuchao - kargakis - lavalamp -- bprashanth reviewers: - caesarxuchao - kargakis - lavalamp -- bprashanth diff --git a/pkg/controller/replication/OWNERS b/pkg/controller/replication/OWNERS index 34b609fb948..cd1992c3e67 100755 --- a/pkg/controller/replication/OWNERS +++ b/pkg/controller/replication/OWNERS @@ -2,9 +2,7 @@ approvers: - caesarxuchao - kargakis - lavalamp -- bprashanth reviewers: - caesarxuchao - kargakis - lavalamp -- bprashanth diff --git a/pkg/controller/service/OWNERS b/pkg/controller/service/OWNERS index 844240a9b29..5e99c8ba0f8 100644 --- a/pkg/controller/service/OWNERS +++ b/pkg/controller/service/OWNERS @@ -1,5 +1,4 @@ reviewers: -- bprashanth - bowei - MrHohn - thockin diff --git a/pkg/controller/statefulset/OWNERS b/pkg/controller/statefulset/OWNERS index 4ff17cf2c72..389a4766b63 100755 --- a/pkg/controller/statefulset/OWNERS +++ b/pkg/controller/statefulset/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - enisoc - foxish - janetkuo @@ -7,7 +6,6 @@ approvers: - kow3ns - smarterclayton reviewers: -- bprashanth - enisoc - foxish - janetkuo diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/pkg/controller/volume/attachdetach/reconciler/reconciler.go index 732638a3aed..d70a59db1fb 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/operationexecutor" ) -// Reconciler runs a periodic loop to reconcile the desired state of the with +// Reconciler runs a periodic loop to reconcile the desired state of the world with // the actual state of the world by triggering attach detach operations. // Note: This is distinct from the Reconciler implemented by the kubelet volume // manager. This reconciles state for the attach/detach controller. That diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go index ba15e8d5261..be158e65c48 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go @@ -364,11 +364,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing. waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin) verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin) - - nodesForVolume := asw.GetNodesForVolume(generatedVolumeName) - if len(nodesForVolume) != 2 { - t.Fatal("Volume was not attached to both nodes") - } + waitForAttachedToNodesCount(t, 2 /* expectedNodeCount */, generatedVolumeName, asw) // Act dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) @@ -455,13 +451,9 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing. waitForTotalAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin) + waitForAttachedToNodesCount(t, 1 /* expectedNodeCount */, generatedVolumeName, asw) nodesForVolume := asw.GetNodesForVolume(generatedVolumeName) - if len(nodesForVolume) == 0 { - t.Fatal("Volume was not attached to any node") - } else if len(nodesForVolume) != 1 { - t.Fatal("Volume was attached to multiple nodes") - } // Act podToDelete := "" @@ -688,6 +680,39 @@ func waitForTotalDetachCallCount( } } +func waitForAttachedToNodesCount( + t *testing.T, + expectedNodeCount int, + volumeName v1.UniqueVolumeName, + asw cache.ActualStateOfWorld) { + + err := retryWithExponentialBackOff( + time.Duration(5*time.Millisecond), + func() (bool, error) { + count := len(asw.GetNodesForVolume(volumeName)) + if count == expectedNodeCount { + return true, nil + } + t.Logf( + "Warning: Wrong number of nodes having <%v> attached. Expected: <%v> Actual: <%v>. Will retry.", + volumeName, + expectedNodeCount, + count) + + return false, nil + }, + ) + + if err != nil { + count := len(asw.GetNodesForVolume(volumeName)) + t.Fatalf( + "Wrong number of nodes having <%v> attached. Expected: <%v> Actual: <%v>", + volumeName, + expectedNodeCount, + count) + } +} + func verifyNewAttacherCallCount( t *testing.T, expectZeroNewAttacherCallCount bool, diff --git a/pkg/credentialprovider/BUILD b/pkg/credentialprovider/BUILD index e773b41bf08..00778151553 100644 --- a/pkg/credentialprovider/BUILD +++ b/pkg/credentialprovider/BUILD @@ -19,7 +19,7 @@ go_library( ], tags = ["automanaged"], deps = [ - "//vendor/github.com/docker/engine-api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -35,7 +35,7 @@ go_test( ], library = ":go_default_library", tags = ["automanaged"], - deps = ["//vendor/github.com/docker/engine-api/types:go_default_library"], + deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], ) filegroup( diff --git a/pkg/credentialprovider/azure/BUILD b/pkg/credentialprovider/azure/BUILD index 41b2154ad53..aacd43a55aa 100644 --- a/pkg/credentialprovider/azure/BUILD +++ b/pkg/credentialprovider/azure/BUILD @@ -16,10 +16,10 @@ go_library( "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/credentialprovider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", ], ) diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index 771c41780b5..257cbee5b81 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -17,12 +17,12 @@ limitations under the License. package azure import ( - "io/ioutil" + "io" + "os" "time" - yaml "gopkg.in/yaml.v2" - "github.com/Azure/azure-sdk-for-go/arm/containerregistry" + "github.com/Azure/go-autorest/autorest" azureapi "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/glog" "github.com/spf13/pflag" @@ -45,10 +45,12 @@ func init() { }) } +// RegistriesClient is a testable interface for the ACR client List operation. type RegistriesClient interface { List() (containerregistry.RegistryListResult, error) } +// NewACRProvider parses the specified configFile and returns a DockerConfigProvider func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider { return &acrProvider{ file: configFile, @@ -57,24 +59,16 @@ func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider type acrProvider struct { file *string - config azure.Config - environment azureapi.Environment + config *azure.Config + environment *azureapi.Environment registryClient RegistriesClient } -func (a *acrProvider) loadConfig(contents []byte) error { - err := yaml.Unmarshal(contents, &a.config) +func (a *acrProvider) loadConfig(rdr io.Reader) error { + var err error + a.config, a.environment, err = azure.ParseConfig(rdr) if err != nil { - return err - } - - if a.config.Cloud == "" { - a.environment = azureapi.PublicCloud - } else { - a.environment, err = azureapi.EnvironmentFromName(a.config.Cloud) - if err != nil { - return err - } + glog.Errorf("Failed to load azure credential file: %v", err) } return nil } @@ -84,27 +78,21 @@ func (a *acrProvider) Enabled() bool { glog.V(5).Infof("Azure config unspecified, disabling") return false } - contents, err := ioutil.ReadFile(*a.file) + + f, err := os.Open(*a.file) if err != nil { - glog.Errorf("Failed to load azure credential file: %v", err) + glog.Errorf("Failed to load config from file: %s", *a.file) return false } - if err := a.loadConfig(contents); err != nil { - glog.Errorf("Failed to parse azure credential file: %v", err) + defer f.Close() + + err = a.loadConfig(f) + if err != nil { + glog.Errorf("Failed to load config from file: %s", *a.file) return false } - oauthConfig, err := a.environment.OAuthConfigForTenant(a.config.TenantID) - if err != nil { - glog.Errorf("Failed to get oauth config: %v", err) - return false - } - - servicePrincipalToken, err := azureapi.NewServicePrincipalToken( - *oauthConfig, - a.config.AADClientID, - a.config.AADClientSecret, - a.environment.ServiceManagementEndpoint) + servicePrincipalToken, err := azure.GetServicePrincipalToken(a.config, a.environment) if err != nil { glog.Errorf("Failed to create service principal token: %v", err) return false @@ -112,7 +100,7 @@ func (a *acrProvider) Enabled() bool { registryClient := containerregistry.NewRegistriesClient(a.config.SubscriptionID) registryClient.BaseURI = a.environment.ResourceManagerEndpoint - registryClient.Authorizer = servicePrincipalToken + registryClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) a.registryClient = registryClient return true diff --git a/pkg/credentialprovider/azure/azure_credentials_test.go b/pkg/credentialprovider/azure/azure_credentials_test.go index 8f697387768..9d966fe6be5 100644 --- a/pkg/credentialprovider/azure/azure_credentials_test.go +++ b/pkg/credentialprovider/azure/azure_credentials_test.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "bytes" "testing" "github.com/Azure/azure-sdk-for-go/arm/containerregistry" @@ -66,7 +67,7 @@ func Test(t *testing.T) { provider := &acrProvider{ registryClient: fakeClient, } - provider.loadConfig([]byte(configStr)) + provider.loadConfig(bytes.NewBufferString(configStr)) creds := provider.Provide() diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index e87f240ced9..9ec96312577 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -26,7 +26,7 @@ import ( "github.com/golang/glog" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" ) diff --git a/pkg/credentialprovider/keyring_test.go b/pkg/credentialprovider/keyring_test.go index 771a6b60544..7aef17d7e5e 100644 --- a/pkg/credentialprovider/keyring_test.go +++ b/pkg/credentialprovider/keyring_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" ) func TestUrlsMatch(t *testing.T) { diff --git a/pkg/credentialprovider/provider.go b/pkg/credentialprovider/provider.go index cb93bd7fb21..419dc43e5df 100644 --- a/pkg/credentialprovider/provider.go +++ b/pkg/credentialprovider/provider.go @@ -22,7 +22,7 @@ import ( "sync" "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" ) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 6edeaf1527f..0fc991e4e0f 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -28,7 +28,7 @@ const ( // // alpha: v1.X // MyFeature utilfeature.Feature = "MyFeature" - // owner: @timstclair + // owner: @tallclair // beta: v1.4 AppArmor utilfeature.Feature = "AppArmor" @@ -44,7 +44,7 @@ const ( // alpha: v1.4 DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" - // owner: timstclair + // owner: tallclair // alpha: v1.5 // // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) diff --git a/pkg/fieldpath/fieldpath.go b/pkg/fieldpath/fieldpath.go index 4001da7997c..caf0096ca0e 100644 --- a/pkg/fieldpath/fieldpath.go +++ b/pkg/fieldpath/fieldpath.go @@ -51,6 +51,8 @@ func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) return accessor.GetName(), nil case "metadata.namespace": return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil } return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) diff --git a/pkg/generated/bindata.go b/pkg/generated/bindata.go index 7312705f6c0..3b46cac4b29 100644 --- a/pkg/generated/bindata.go +++ b/pkg/generated/bindata.go @@ -3075,18 +3075,18 @@ msgstr "Describe one or many contexts" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 #: pkg/kubectl/cmd/top_node.go:78 -msgid "Display Resource (CPU/Memory/Storage) usage of nodes" -msgstr "Display Resource (CPU/Memory/Storage) usage of nodes" +msgid "Display Resource (CPU/Memory) usage of nodes" +msgstr "Display Resource (CPU/Memory) usage of nodes" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 #: pkg/kubectl/cmd/top_pod.go:80 -msgid "Display Resource (CPU/Memory/Storage) usage of pods" -msgstr "Display Resource (CPU/Memory/Storage) usage of pods" +msgid "Display Resource (CPU/Memory) usage of pods" +msgstr "Display Resource (CPU/Memory) usage of pods" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 #: pkg/kubectl/cmd/top.go:44 -msgid "Display Resource (CPU/Memory/Storage) usage." -msgstr "Display Resource (CPU/Memory/Storage) usage." +msgid "Display Resource (CPU/Memory) usage." +msgstr "Display Resource (CPU/Memory) usage." # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 #: pkg/kubectl/cmd/clusterinfo.go:51 @@ -6551,18 +6551,18 @@ msgstr "Describe one or many contexts" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 #: pkg/kubectl/cmd/top_node.go:78 -msgid "Display Resource (CPU/Memory/Storage) usage of nodes" -msgstr "Display Resource (CPU/Memory/Storage) usage of nodes" +msgid "Display Resource (CPU/Memory) usage of nodes" +msgstr "Display Resource (CPU/Memory) usage of nodes" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 #: pkg/kubectl/cmd/top_pod.go:80 -msgid "Display Resource (CPU/Memory/Storage) usage of pods" -msgstr "Display Resource (CPU/Memory/Storage) usage of pods" +msgid "Display Resource (CPU/Memory) usage of pods" +msgstr "Display Resource (CPU/Memory) usage of pods" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 #: pkg/kubectl/cmd/top.go:44 -msgid "Display Resource (CPU/Memory/Storage) usage." -msgstr "Display Resource (CPU/Memory/Storage) usage." +msgid "Display Resource (CPU/Memory) usage." +msgstr "Display Resource (CPU/Memory) usage." # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 #: pkg/kubectl/cmd/clusterinfo.go:51 @@ -9105,15 +9105,15 @@ msgid "Describe one or many contexts" msgstr "" #: pkg/kubectl/cmd/top_node.go:78 -msgid "Display Resource (CPU/Memory/Storage) usage of nodes" +msgid "Display Resource (CPU/Memory) usage of nodes" msgstr "" #: pkg/kubectl/cmd/top_pod.go:80 -msgid "Display Resource (CPU/Memory/Storage) usage of pods" +msgid "Display Resource (CPU/Memory) usage of pods" msgstr "" #: pkg/kubectl/cmd/top.go:44 -msgid "Display Resource (CPU/Memory/Storage) usage." +msgid "Display Resource (CPU/Memory) usage." msgstr "" #: pkg/kubectl/cmd/clusterinfo.go:51 diff --git a/pkg/generated/openapi/BUILD b/pkg/generated/openapi/BUILD index e865271f284..c9c082f1fa6 100644 --- a/pkg/generated/openapi/BUILD +++ b/pkg/generated/openapi/BUILD @@ -42,6 +42,7 @@ openapi_library( "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", + "k8s.io/apimachinery/pkg/apis/testapigroup/v1", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/version", diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index 9e953c04dde..af9c5799da1 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -20,6 +20,7 @@ go_test( "namespace_test.go", "proxy_server_test.go", "quota_test.go", + "resource_filter_test.go", "rolebinding_test.go", "rolling_updater_test.go", "rollout_status_test.go", @@ -51,6 +52,7 @@ go_test( "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", "//pkg/kubectl/util:go_default_library", + "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", @@ -143,10 +145,9 @@ go_library( "//pkg/credentialprovider:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/util:go_default_library", + "//pkg/kubectl/util/slice:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", - "//pkg/util:go_default_library", - "//pkg/util/slice:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index aabaeec1885..cf23568467f 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -94,14 +94,13 @@ go_library( "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/util:go_default_library", - "//pkg/kubelet/types:go_default_library", + "//pkg/kubectl/util/term:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/util/exec:go_default_library", "//pkg/util/i18n:go_default_library", "//pkg/util/interrupt:go_default_library", "//pkg/util/taints:go_default_library", - "//pkg/util/term:go_default_library", "//pkg/version:go_default_library", "//vendor/github.com/daviddengcn/go-colortext:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", @@ -132,7 +131,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/remotecommand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", @@ -146,6 +144,7 @@ go_library( "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/portforward:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/client-go/transport/spdy:go_default_library", ], ) @@ -214,11 +213,11 @@ go_test( "//pkg/kubectl/cmd/util/openapi:go_default_library", "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/resource:go_default_library", + "//pkg/kubectl/util/term:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/util/i18n:go_default_library", "//pkg/util/strings:go_default_library", - "//pkg/util/term:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubectl/cmd/annotate.go b/pkg/kubectl/cmd/annotate.go index ed36be72385..b72aa0a9060 100644 --- a/pkg/kubectl/cmd/annotate.go +++ b/pkg/kubectl/cmd/annotate.go @@ -117,10 +117,10 @@ func NewCmdAnnotate(f cmdutil.Factory, out io.Writer) *cobra.Command { Example: annotateExample, Run: func(cmd *cobra.Command, args []string) { if err := options.Complete(out, cmd, args); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) } if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) } cmdutil.CheckErr(options.RunAnnotate(f, cmd)) }, @@ -167,7 +167,7 @@ func (o *AnnotateOptions) Complete(out io.Writer, cmd *cobra.Command, args []str // Validate checks to the AnnotateOptions to see if there is sufficient information run the command. func (o AnnotateOptions) Validate() error { - if len(o.resources) < 1 && cmdutil.IsFilenameEmpty(o.Filenames) { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { return fmt.Errorf("one or more resources must be specified as or /") } if len(o.newAnnotations) < 1 && len(o.removeAnnotations) < 1 { @@ -225,7 +225,7 @@ func (o AnnotateOptions) RunAnnotate(f cmdutil.Factory, cmd *cobra.Command) erro var outputObj runtime.Object var obj runtime.Object - obj, err = cmdutil.MaybeConvertObject(info.Object, info.Mapping.GroupVersionKind.GroupVersion(), info.Mapping) + obj, err = info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion()) if err != nil { return err } diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index 7618a0368d3..ba3192c37bd 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -56,6 +56,7 @@ type ApplyOptions struct { GracePeriod int PruneResources []pruneResource Timeout time.Duration + cmdBaseName string } const ( @@ -65,8 +66,6 @@ const ( backOffPeriod = 1 * time.Second // how many times we can retry before back off triesBeforeBackOff = 1 - - warningNoLastAppliedConfigAnnotation = "Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\n" ) var ( @@ -92,11 +91,17 @@ var ( # Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file. kubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap`)) + + warningNoLastAppliedConfigAnnotation = "Warning: %[1]s apply should be used on resource created by either %[1]s create --save-config or %[1]s apply\n" ) -func NewCmdApply(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { +func NewCmdApply(baseName string, f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { var options ApplyOptions + // Store baseName for use in printing warnings / messages involving the base command name. + // This is useful for downstream command that wrap this one. + options.cmdBaseName = baseName + cmd := &cobra.Command{ Use: "apply -f FILENAME", Short: i18n.T("Apply a configuration to a resource by filename or stdin"), @@ -137,7 +142,7 @@ func NewCmdApply(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { func validateArgs(cmd *cobra.Command, args []string) error { if len(args) != 0 { - return cmdutil.UsageError(cmd, "Unexpected args: %v", args) + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) } return nil @@ -299,7 +304,7 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti return err } if _, ok := annotationMap[api.LastAppliedConfigAnnotation]; !ok { - fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation) + fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation, options.cmdBaseName) } overwrite := cmdutil.GetFlagBool(cmd, "overwrite") helper := resource.NewHelper(info.Client, info.Mapping) diff --git a/pkg/kubectl/cmd/apply_set_last_applied.go b/pkg/kubectl/cmd/apply_set_last_applied.go index 188e54b09b7..cf967578b19 100644 --- a/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply_set_last_applied.go @@ -161,7 +161,7 @@ func (o *SetLastAppliedOptions) Validate(f cmdutil.Factory, cmd *cobra.Command) return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%v\nfrom server for:", info), info.Source, err) } if oringalBuf == nil && !o.CreateAnnotation { - return cmdutil.UsageError(cmd, "no last-applied-configuration annotation found on resource: %s, to create the annotation, run the command with --create-annotation", info.Name) + return cmdutil.UsageErrorf(cmd, "no last-applied-configuration annotation found on resource: %s, to create the annotation, run the command with --create-annotation", info.Name) } //only add to PatchBufferList when changed diff --git a/pkg/kubectl/cmd/apply_test.go b/pkg/kubectl/cmd/apply_test.go index 838624f38ba..e166edae0c8 100644 --- a/pkg/kubectl/cmd/apply_test.go +++ b/pkg/kubectl/cmd/apply_test.go @@ -47,7 +47,7 @@ func TestApplyExtraArgsFail(t *testing.T) { errBuf := bytes.NewBuffer([]byte{}) f, _, _, _ := cmdtesting.NewAPIFactory() - c := NewCmdApply(f, buf, errBuf) + c := NewCmdApply("kubectl", f, buf, errBuf) if validateApplyArgs(c, []string{"rc"}) == nil { t.Fatalf("unexpected non-error") } @@ -55,7 +55,7 @@ func TestApplyExtraArgsFail(t *testing.T) { func validateApplyArgs(cmd *cobra.Command, args []string) error { if len(args) != 0 { - return cmdutil.UsageError(cmd, "Unexpected args: %v", args) + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) } return nil } @@ -377,14 +377,14 @@ func TestApplyObjectWithoutAnnotation(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) // uses the name from the file, not the response expectRC := "replicationcontroller/" + nameRC + "\n" - expectWarning := warningNoLastAppliedConfigAnnotation + expectWarning := fmt.Sprintf(warningNoLastAppliedConfigAnnotation, "kubectl") if errBuf.String() != expectWarning { t.Fatalf("unexpected non-warning: %s\nexpected: %s", errBuf.String(), expectWarning) } @@ -422,7 +422,7 @@ func TestApplyObject(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -479,7 +479,7 @@ func TestApplyObjectOutput(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "yaml") cmd.Run(cmd, []string{}) @@ -533,7 +533,7 @@ func TestApplyRetry(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -578,7 +578,7 @@ func TestApplyNonExistObject(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -636,7 +636,7 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) if asList { cmd.Flags().Set("filename", filenameRCSVC) } else { @@ -729,7 +729,7 @@ func TestApplyNULLPreservation(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameDeployObjClientside) cmd.Flags().Set("output", "name") @@ -789,7 +789,7 @@ func TestUnstructuredApply(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameWidgetClientside) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -876,7 +876,7 @@ func TestUnstructuredIdempotentApply(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameWidgetClientside) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) diff --git a/pkg/kubectl/cmd/apply_view_last_applied.go b/pkg/kubectl/cmd/apply_view_last_applied.go index 033da8b9d94..10e8809d1ee 100644 --- a/pkg/kubectl/cmd/apply_view_last_applied.go +++ b/pkg/kubectl/cmd/apply_view_last_applied.go @@ -142,13 +142,13 @@ func (o *ViewLastAppliedOptions) RunApplyViewLastApplied() error { if err != nil { return err } - fmt.Fprintf(o.Out, string(jsonBuffer.Bytes())) + fmt.Fprint(o.Out, string(jsonBuffer.Bytes())) case "yaml": yamlOutput, err := yaml.JSONToYAML([]byte(str)) if err != nil { return err } - fmt.Fprintf(o.Out, string(yamlOutput)) + fmt.Fprint(o.Out, string(yamlOutput)) } } @@ -166,6 +166,6 @@ func (o *ViewLastAppliedOptions) ValidateOutputArgs(cmd *cobra.Command) error { o.OutputFormat = "yaml" return nil default: - return cmdutil.UsageError(cmd, "Unexpected -o output mode: %s, the flag 'output' must be one of yaml|json", format) + return cmdutil.UsageErrorf(cmd, "Unexpected -o output mode: %s, the flag 'output' must be one of yaml|json", format) } } diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index bdb3f352def..d6f53884120 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "io" "net/url" @@ -27,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" - remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "k8s.io/kubernetes/pkg/api" @@ -96,17 +96,16 @@ type RemoteAttach interface { type DefaultRemoteAttach struct{} func (*DefaultRemoteAttach) Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - exec, err := remotecommand.NewExecutor(config, method, url) + exec, err := remotecommand.NewSPDYExecutor(config, method, url) if err != nil { return err } return exec.Stream(remotecommand.StreamOptions{ - SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, - Stdin: stdin, - Stdout: stdout, - Stderr: stderr, - Tty: tty, - TerminalSizeQueue: terminalSizeQueue, + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + Tty: tty, + TerminalSizeQueue: terminalSizeQueue, }) } @@ -127,10 +126,10 @@ type AttachOptions struct { // Complete verifies command line arguments and loads data from the command environment func (p *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn []string) error { if len(argsIn) == 0 { - return cmdutil.UsageError(cmd, "at least one argument is required for attach") + return cmdutil.UsageErrorf(cmd, "at least 1 argument is required for attach") } if len(argsIn) > 2 { - return cmdutil.UsageError(cmd, fmt.Sprintf("expected fewer than three arguments: POD or TYPE/NAME or TYPE NAME, saw %d: %s", len(argsIn), argsIn)) + return cmdutil.UsageErrorf(cmd, "expected POD, TYPE/NAME, or TYPE NAME, (at most 2 arguments) saw %d: %v", len(argsIn), argsIn) } namespace, _, err := f.DefaultNamespace() @@ -140,7 +139,7 @@ func (p *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn [ p.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, err.Error()) } builder := f.NewBuilder(true). @@ -189,13 +188,13 @@ func (p *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn [ func (p *AttachOptions) Validate() error { allErrs := []error{} if len(p.PodName) == 0 { - allErrs = append(allErrs, fmt.Errorf("pod name must be specified")) + allErrs = append(allErrs, errors.New("pod name must be specified")) } if p.Out == nil || p.Err == nil { - allErrs = append(allErrs, fmt.Errorf("both output and error output must be provided")) + allErrs = append(allErrs, errors.New("both output and error output must be provided")) } if p.Attach == nil || p.PodClient == nil || p.Config == nil { - allErrs = append(allErrs, fmt.Errorf("client, client config, and attach must be provided")) + allErrs = append(allErrs, errors.New("client, client config, and attach must be provided")) } return utilerrors.NewAggregate(allErrs) } diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index 4d0fadf1e9c..288ee072384 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -108,7 +108,7 @@ func RunAutoscale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s generators := f.Generators("autoscale") generator, found := generators[generatorName] if !found { - return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName)) + return cmdutil.UsageErrorf(cmd, "generator %q not found.", generatorName) } names := generator.ParamNames() diff --git a/pkg/kubectl/cmd/certificates.go b/pkg/kubectl/cmd/certificates.go index 2f3b57ba3d4..107a5639779 100644 --- a/pkg/kubectl/cmd/certificates.go +++ b/pkg/kubectl/cmd/certificates.go @@ -59,7 +59,7 @@ func (options *CertificateOptions) Complete(cmd *cobra.Command, args []string) e } func (options *CertificateOptions) Validate() error { - if len(options.csrNames) < 1 && cmdutil.IsFilenameEmpty(options.Filenames) { + if len(options.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { return fmt.Errorf("one or more CSRs must be specified as or -f ") } return nil diff --git a/pkg/kubectl/cmd/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo_dump.go index 49efaf82b9a..f1d420fd7a4 100644 --- a/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo_dump.go @@ -91,7 +91,7 @@ func setupOutputWriter(cmd *cobra.Command, defaultWriter io.Writer, filename str func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, out io.Writer) error { timeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, err.Error()) } clientset, err := f.ClientSet() diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 1bafc28778c..7df0f02a2ba 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -67,15 +67,6 @@ __kubectl_override_flags() done } -__kubectl_get_namespaces() -{ - local template kubectl_out - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out[*]}" -- "$cur" ) ) - fi -} - __kubectl_config_get_contexts() { __kubectl_parse_config "contexts" @@ -119,6 +110,11 @@ __kubectl_get_resource() __kubectl_parse_get "${nouns[${#nouns[@]} -1]}" } +__kubectl_get_resource_namespace() +{ + __kubectl_parse_get "namespace" +} + __kubectl_get_resource_pod() { __kubectl_parse_get "pod" @@ -190,6 +186,10 @@ __custom_func() { __kubectl_config_get_contexts return ;; + kubectl_config_delete-cluster) + __kubectl_config_get_clusters + return + ;; *) ;; esac @@ -238,13 +238,12 @@ __custom_func() { * services (aka 'svc') * statefulsets * storageclasses - * thirdpartyresources ` ) var ( bash_completion_flags = map[string]string{ - "namespace": "__kubectl_get_namespaces", + "namespace": "__kubectl_get_resource_namespace", "context": "__kubectl_config_get_contexts", "cluster": "__kubectl_config_get_clusters", "user": "__kubectl_config_get_users", @@ -338,7 +337,7 @@ func NewKubectlCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob { Message: "Advanced Commands:", Commands: []*cobra.Command{ - NewCmdApply(f, out, err), + NewCmdApply("kubectl", f, out, err), NewCmdPatch(f, out), NewCmdReplace(f, out), deprecatedAlias("update", NewCmdReplace(f, out)), diff --git a/pkg/kubectl/cmd/completion.go b/pkg/kubectl/cmd/completion.go index 47ce1cb9547..09f8f5cead4 100644 --- a/pkg/kubectl/cmd/completion.go +++ b/pkg/kubectl/cmd/completion.go @@ -116,14 +116,14 @@ func NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command { func RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args []string) error { if len(args) == 0 { - return cmdutil.UsageError(cmd, "Shell not specified.") + return cmdutil.UsageErrorf(cmd, "Shell not specified.") } if len(args) > 1 { - return cmdutil.UsageError(cmd, "Too many arguments. Expected only the shell type.") + return cmdutil.UsageErrorf(cmd, "Too many arguments. Expected only the shell type.") } run, found := completion_shells[args[0]] if !found { - return cmdutil.UsageError(cmd, "Unsupported shell type %q.", args[0]) + return cmdutil.UsageErrorf(cmd, "Unsupported shell type %q.", args[0]) } if len(boilerPlate) == 0 { diff --git a/pkg/kubectl/cmd/config/rename_context.go b/pkg/kubectl/cmd/config/rename_context.go index 7d00eb83ac8..359c58d5d4d 100644 --- a/pkg/kubectl/cmd/config/rename_context.go +++ b/pkg/kubectl/cmd/config/rename_context.go @@ -70,7 +70,7 @@ func NewCmdConfigRenameContext(out io.Writer, configAccess clientcmd.ConfigAcces cmdutil.CheckErr(err) } if err := options.Validate(); err != nil { - cmdutil.UsageError(cmd, err.Error()) + cmdutil.UsageErrorf(cmd, err.Error()) } if err := options.RunRenameContext(out); err != nil { cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/convert.go b/pkg/kubectl/cmd/convert.go index c96e9420bd8..e43b00e67c8 100644 --- a/pkg/kubectl/cmd/convert.go +++ b/pkg/kubectl/cmd/convert.go @@ -122,7 +122,7 @@ func (o *ConvertOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.C return err } if !api.Registry.IsEnabledVersion(o.outputVersion) { - cmdutil.UsageError(cmd, "'%s' is not a registered version.", o.outputVersion) + cmdutil.UsageErrorf(cmd, "'%s' is not a registered version.", o.outputVersion) } // build the builder diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go index fd9eaeacd29..1e561ea1298 100644 --- a/pkg/kubectl/cmd/cp.go +++ b/pkg/kubectl/cmd/cp.go @@ -18,7 +18,7 @@ package cmd import ( "archive/tar" - "fmt" + "errors" "io" "io/ioutil" "os" @@ -80,13 +80,17 @@ type fileSpec struct { File string } +var errFileSpecDoesntMatchFormat = errors.New("Filespec must match the canonical format: [[namespace/]pod:]file/path") + func extractFileSpec(arg string) (fileSpec, error) { pieces := strings.Split(arg, ":") if len(pieces) == 1 { return fileSpec{File: arg}, nil } if len(pieces) != 2 { - return fileSpec{}, fmt.Errorf("Unexpected fileSpec: %s, expected [[namespace/]pod:]file/path", arg) + // FIXME Kubernetes can't copy files that contain a ':' + // character. + return fileSpec{}, errFileSpecDoesntMatchFormat } file := pieces[1] @@ -105,12 +109,12 @@ func extractFileSpec(arg string) (fileSpec, error) { }, nil } - return fileSpec{}, fmt.Errorf("Unexpected file spec: %s, expected [[namespace/]pod:]file/path", arg) + return fileSpec{}, errFileSpecDoesntMatchFormat } func runCopy(f cmdutil.Factory, cmd *cobra.Command, out, cmderr io.Writer, args []string) error { if len(args) != 2 { - return cmdutil.UsageError(cmd, cpUsageStr) + return cmdutil.UsageErrorf(cmd, cpUsageStr) } srcSpec, err := extractFileSpec(args[0]) if err != nil { @@ -126,7 +130,7 @@ func runCopy(f cmdutil.Factory, cmd *cobra.Command, out, cmderr io.Writer, args if len(destSpec.PodName) != 0 { return copyToPod(f, cmd, out, cmderr, srcSpec, destSpec) } - return cmdutil.UsageError(cmd, "One of src or dest must be a remote file specification") + return cmdutil.UsageErrorf(cmd, "One of src or dest must be a remote file specification") } func copyToPod(f cmdutil.Factory, cmd *cobra.Command, stdout, stderr io.Writer, src, dest fileSpec) error { diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index 6e0c6c4e428..f3658199e55 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -64,7 +64,7 @@ func NewCmdCreate(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { Long: createLong, Example: createExample, Run: func(cmd *cobra.Command, args []string) { - if cmdutil.IsFilenameEmpty(options.FilenameOptions.Filenames) { + if cmdutil.IsFilenameSliceEmpty(options.FilenameOptions.Filenames) { defaultRunFunc := cmdutil.DefaultSubCommandRun(errOut) defaultRunFunc(cmd, args) return @@ -106,7 +106,7 @@ func NewCmdCreate(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { func ValidateArgs(cmd *cobra.Command, args []string) error { if len(args) != 0 { - return cmdutil.UsageError(cmd, "Unexpected args: %v", args) + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) } return nil } @@ -229,7 +229,7 @@ func createAndRefresh(info *resource.Info) error { // NameFromCommandArgs is a utility function for commands that assume the first argument is a resource name func NameFromCommandArgs(cmd *cobra.Command, args []string) (string, error) { if len(args) == 0 { - return "", cmdutil.UsageError(cmd, "NAME is required") + return "", cmdutil.UsageErrorf(cmd, "NAME is required") } return args[0], nil } @@ -241,8 +241,7 @@ type CreateSubcommandOptions struct { // StructuredGenerator is the resource generator for the object being created StructuredGenerator kubectl.StructuredGenerator // DryRun is true if the command should be simulated but not run against the server - DryRun bool - // OutputFormat + DryRun bool OutputFormat string } diff --git a/pkg/kubectl/cmd/create_secret.go b/pkg/kubectl/cmd/create_secret.go index 10c9d51eb1a..0358bb162ef 100644 --- a/pkg/kubectl/cmd/create_secret.go +++ b/pkg/kubectl/cmd/create_secret.go @@ -176,7 +176,7 @@ func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra. requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"} for _, requiredFlag := range requiredFlags { if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 { - return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag) + return cmdutil.UsageErrorf(cmd, "flag %s is required", requiredFlag) } } var generator kubectl.StructuredGenerator @@ -241,7 +241,7 @@ func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, ar requiredFlags := []string{"cert", "key"} for _, requiredFlag := range requiredFlags { if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 { - return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag) + return cmdutil.UsageErrorf(cmd, "flag %s is required", requiredFlag) } } var generator kubectl.StructuredGenerator diff --git a/pkg/kubectl/cmd/create_service.go b/pkg/kubectl/cmd/create_service.go index 4995427fee4..44a74360c49 100644 --- a/pkg/kubectl/cmd/create_service.go +++ b/pkg/kubectl/cmd/create_service.go @@ -83,7 +83,7 @@ func NewCmdCreateServiceClusterIP(f cmdutil.Factory, cmdOut io.Writer) *cobra.Co } func errUnsupportedGenerator(cmd *cobra.Command, generatorName string) error { - return cmdutil.UsageError(cmd, "Generator %s not supported. ", generatorName) + return cmdutil.UsageErrorf(cmd, "Generator %s not supported. ", generatorName) } // CreateServiceClusterIP implements the behavior to run the create service clusterIP command diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 8e9e178fe62..42ce9353ada 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -137,7 +137,7 @@ func NewCmdDelete(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmdutil.CheckErr(err) } if err := options.Validate(cmd); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) } if err := options.RunDelete(); err != nil { cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index ee8b6bd9cfa..f047a42789b 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -110,9 +110,9 @@ func RunDescribe(f cmdutil.Factory, out, cmdErr io.Writer, cmd *cobra.Command, a if allNamespaces { enforceNamespace = false } - if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { fmt.Fprint(cmdErr, "You must specify the type of resource to describe. ", validResources) - return cmdutil.UsageError(cmd, "Required resource not specified.") + return cmdutil.UsageErrorf(cmd, "Required resource not specified.") } builder, err := f.NewUnstructuredBuilder(true) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 1eaaa4cf599..83eb61b1e36 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -20,21 +20,25 @@ import ( "errors" "fmt" "io" + "k8s.io/apimachinery/pkg/util/json" "math" - "reflect" "strings" "time" "github.com/jonboulle/clockwork" "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -43,7 +47,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/i18n" ) @@ -84,7 +87,6 @@ const ( kLocalStorageWarning = "Deleting pods with local storage" kUnmanagedFatal = "pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override)" kUnmanagedWarning = "Deleting pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" - kMaxNodeUpdateRetry = 10 ) var ( @@ -197,7 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error { var err error if len(args) != 1 { - return cmdutil.UsageError(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use)) + return cmdutil.UsageErrorf(cmd, "USAGE: %s [flags]", cmd.Use) } if o.client, err = o.Factory.ClientSet(); err != nil { @@ -353,7 +355,7 @@ func (o *DrainOptions) daemonsetFilter(pod api.Pod) (bool, *warning, *fatal) { } func mirrorPodFilter(pod api.Pod) (bool, *warning, *fatal) { - if _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]; found { + if _, found := pod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found { return false, nil, nil } return true, nil, nil @@ -621,27 +623,28 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error { } if o.nodeInfo.Mapping.GroupVersionKind.Kind == "Node" { - unsched := reflect.ValueOf(o.nodeInfo.Object).Elem().FieldByName("Spec").FieldByName("Unschedulable") - if unsched.Bool() == desired { + obj, err := o.nodeInfo.Mapping.ConvertToVersion(o.nodeInfo.Object, o.nodeInfo.Mapping.GroupVersionKind.GroupVersion()) + if err != nil { + return err + } + oldData, err := json.Marshal(obj) + node, ok := obj.(*corev1.Node) + if !ok { + return fmt.Errorf("unexpected Type%T, expected Node", obj) + } + unsched := node.Spec.Unschedulable + if unsched == desired { cmdutil.PrintSuccess(o.mapper, false, o.Out, o.nodeInfo.Mapping.Resource, o.nodeInfo.Name, false, already(desired)) } else { helper := resource.NewHelper(o.restClient, o.nodeInfo.Mapping) - unsched.SetBool(desired) + node.Spec.Unschedulable = desired var err error - for i := 0; i < kMaxNodeUpdateRetry; i++ { - // We don't care about what previous versions may exist, we always want - // to overwrite, and Replace always sets current ResourceVersion if version is "". - helper.Versioner.SetResourceVersion(o.nodeInfo.Object, "") - _, err = helper.Replace(cmdNamespace, o.nodeInfo.Name, true, o.nodeInfo.Object) - if err != nil { - if !apierrors.IsConflict(err) { - return err - } - } else { - break - } - // It's a race, no need to sleep + newData, err := json.Marshal(obj) + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj) + if err != nil { + return err } + _, err = helper.Patch(cmdNamespace, o.nodeInfo.Name, types.StrategicMergePatchType, patchBytes) if err != nil { return err } diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 36ea363f48e..7a28cf62f40 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -33,11 +33,13 @@ import ( "github.com/spf13/cobra" + "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest/fake" "k8s.io/kubernetes/pkg/api" @@ -55,27 +57,27 @@ const ( DeleteMethod = "Delete" ) -var node *api.Node -var cordoned_node *api.Node +var node *v1.Node +var cordoned_node *v1.Node func boolptr(b bool) *bool { return &b } func TestMain(m *testing.M) { // Create a node. - node = &api.Node{ + node = &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node", CreationTimestamp: metav1.Time{Time: time.Now()}, }, - Spec: api.NodeSpec{ + Spec: v1.NodeSpec{ ExternalID: "node", }, - Status: api.NodeStatus{}, + Status: v1.NodeStatus{}, } clone, _ := api.Scheme.DeepCopy(node) // A copy of the same node, but cordoned. - cordoned_node = clone.(*api.Node) + cordoned_node = clone.(*v1.Node) cordoned_node.Spec.Unschedulable = true os.Exit(m.Run()) } @@ -83,8 +85,8 @@ func TestMain(m *testing.M) { func TestCordon(t *testing.T) { tests := []struct { description string - node *api.Node - expected *api.Node + node *v1.Node + expected *v1.Node cmd func(cmdutil.Factory, io.Writer) *cobra.Command arg string expectFatal bool @@ -149,7 +151,7 @@ func TestCordon(t *testing.T) { for _, test := range tests { f, tf, codec, ns := cmdtesting.NewAPIFactory() - new_node := &api.Node{} + new_node := &v1.Node{} updated := false tf.Client = &fake.RESTClient{ APIRegistry: api.Registry, @@ -161,17 +163,25 @@ func TestCordon(t *testing.T) { return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, test.node)}, nil case m.isFor("GET", "/nodes/bar"): return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("nope")}, nil - case m.isFor("PUT", "/nodes/node"): + case m.isFor("PATCH", "/nodes/node"): data, err := ioutil.ReadAll(req.Body) if err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } defer req.Body.Close() - if err := runtime.DecodeInto(codec, data, new_node); err != nil { + oldJSON, err := runtime.Encode(codec, node) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + appliedPatch, err := strategicpatch.StrategicMergePatch(oldJSON, data, &v1.Node{}) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + if err := runtime.DecodeInto(codec, appliedPatch, new_node); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { - t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec, new_node.Spec) + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec.Unschedulable, new_node.Spec.Unschedulable) } updated = true return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, new_node)}, nil @@ -443,8 +453,8 @@ func TestDrain(t *testing.T) { tests := []struct { description string - node *api.Node - expected *api.Node + node *v1.Node + expected *v1.Node pods []api.Pod rcs []api.ReplicationController replicaSets []extensions.ReplicaSet @@ -582,7 +592,7 @@ func TestDrain(t *testing.T) { currMethod = DeleteMethod } for _, test := range tests { - new_node := &api.Node{} + new_node := &v1.Node{} deleted := false evicted := false f, tf, codec, ns := cmdtesting.NewAPIFactory() @@ -649,13 +659,21 @@ func TestDrain(t *testing.T) { return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.PodList{Items: test.pods})}, nil case m.isFor("GET", "/replicationcontrollers"): return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationControllerList{Items: test.rcs})}, nil - case m.isFor("PUT", "/nodes/node"): + case m.isFor("PATCH", "/nodes/node"): data, err := ioutil.ReadAll(req.Body) if err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } defer req.Body.Close() - if err := runtime.DecodeInto(codec, data, new_node); err != nil { + oldJSON, err := runtime.Encode(codec, node) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + appliedPatch, err := strategicpatch.StrategicMergePatch(oldJSON, data, &v1.Node{}) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + if err := runtime.DecodeInto(codec, appliedPatch, new_node); err != nil { t.Fatalf("%s: unexpected error: %v", test.description, err) } if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { @@ -692,7 +710,6 @@ func TestDrain(t *testing.T) { cmd.SetArgs(test.args) cmd.Execute() }() - if test.expectFatal { if !saw_fatal { t.Fatalf("%s: unexpected non-error when using %s", test.description, currMethod) diff --git a/pkg/kubectl/cmd/edit.go b/pkg/kubectl/cmd/edit.go index 64270828e1d..7658330eb2b 100644 --- a/pkg/kubectl/cmd/edit.go +++ b/pkg/kubectl/cmd/edit.go @@ -107,6 +107,7 @@ func NewCmdEdit(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) cmdutil.AddValidateOptionFlags(cmd, &options.ValidateOptions) cmd.Flags().StringVarP(&options.Output, "output", "o", "yaml", "Output format. One of: yaml|json.") + cmd.Flags().BoolVarP(&options.OutputPatch, "output-patch", "", false, "Output the patch if the resource is edited.") cmd.Flags().BoolVar(&options.WindowsLineEndings, "windows-line-endings", runtime.GOOS == "windows", "Defaults to the line ending native to your platform.") diff --git a/pkg/kubectl/cmd/edit_test.go b/pkg/kubectl/cmd/edit_test.go index 1155efa6ef2..e821793a3d3 100644 --- a/pkg/kubectl/cmd/edit_test.go +++ b/pkg/kubectl/cmd/edit_test.go @@ -49,6 +49,7 @@ type EditTestCase struct { Args []string `yaml:"args"` Filename string `yaml:"filename"` Output string `yaml:"outputFormat"` + OutputPatch string `yaml:"outputPatch"` SaveConfig string `yaml:"saveConfig"` Namespace string `yaml:"namespace"` ExpectedStdout []string `yaml:"expectedStdout"` @@ -250,6 +251,9 @@ func TestEdit(t *testing.T) { if len(testcase.Output) > 0 { cmd.Flags().Set("output", testcase.Output) } + if len(testcase.OutputPatch) > 0 { + cmd.Flags().Set("output-patch", testcase.OutputPatch) + } if len(testcase.SaveConfig) > 0 { cmd.Flags().Set("save-config", testcase.SaveConfig) } diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go index d74d1893efd..76522bdf453 100644 --- a/pkg/kubectl/cmd/exec.go +++ b/pkg/kubectl/cmd/exec.go @@ -25,16 +25,15 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "k8s.io/kubernetes/pkg/api" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/util/term" "k8s.io/kubernetes/pkg/util/i18n" "k8s.io/kubernetes/pkg/util/interrupt" - "k8s.io/kubernetes/pkg/util/term" ) var ( @@ -101,17 +100,16 @@ type RemoteExecutor interface { type DefaultRemoteExecutor struct{} func (*DefaultRemoteExecutor) Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - exec, err := remotecommand.NewExecutor(config, method, url) + exec, err := remotecommand.NewSPDYExecutor(config, method, url) if err != nil { return err } return exec.Stream(remotecommand.StreamOptions{ - SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, - Stdin: stdin, - Stdout: stdout, - Stderr: stderr, - Tty: tty, - TerminalSizeQueue: terminalSizeQueue, + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + Tty: tty, + TerminalSizeQueue: terminalSizeQueue, }) } @@ -152,19 +150,19 @@ type ExecOptions struct { func (p *ExecOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn []string, argsLenAtDash int) error { // Let kubectl exec follow rules for `--`, see #13004 issue if len(p.PodName) == 0 && (len(argsIn) == 0 || argsLenAtDash == 0) { - return cmdutil.UsageError(cmd, execUsageStr) + return cmdutil.UsageErrorf(cmd, execUsageStr) } if len(p.PodName) != 0 { printDeprecationWarning("exec POD_NAME", "-p POD_NAME") if len(argsIn) < 1 { - return cmdutil.UsageError(cmd, execUsageStr) + return cmdutil.UsageErrorf(cmd, execUsageStr) } p.Command = argsIn } else { p.PodName = argsIn[0] p.Command = argsIn[1:] if len(p.Command) < 1 { - return cmdutil.UsageError(cmd, execUsageStr) + return cmdutil.UsageErrorf(cmd, execUsageStr) } } diff --git a/pkg/kubectl/cmd/exec_test.go b/pkg/kubectl/cmd/exec_test.go index 8d72c724bc4..6c57bbd80c0 100644 --- a/pkg/kubectl/cmd/exec_test.go +++ b/pkg/kubectl/cmd/exec_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/client-go/tools/remotecommand" "k8s.io/kubernetes/pkg/api" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" - "k8s.io/kubernetes/pkg/util/term" + "k8s.io/kubernetes/pkg/kubectl/util/term" ) type fakeRemoteExecutor struct { diff --git a/pkg/kubectl/cmd/explain.go b/pkg/kubectl/cmd/explain.go index 7a8230f3254..e2967e03da4 100644 --- a/pkg/kubectl/cmd/explain.go +++ b/pkg/kubectl/cmd/explain.go @@ -66,10 +66,10 @@ func NewCmdExplain(f cmdutil.Factory, out, cmdErr io.Writer) *cobra.Command { func RunExplain(f cmdutil.Factory, out, cmdErr io.Writer, cmd *cobra.Command, args []string) error { if len(args) == 0 { fmt.Fprint(cmdErr, "You must specify the type of resource to explain. ", validResources) - return cmdutil.UsageError(cmd, "Required resource not specified.") + return cmdutil.UsageErrorf(cmd, "Required resource not specified.") } if len(args) > 1 { - return cmdutil.UsageError(cmd, "We accept only this format: explain RESOURCE") + return cmdutil.UsageErrorf(cmd, "We accept only this format: explain RESOURCE") } recursive := cmdutil.GetFlagBool(cmd, "recursive") diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index 20580d8b399..140c9a74431 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "fmt" "io" "regexp" "strings" @@ -136,7 +135,7 @@ func RunExpose(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri Do() err = r.Err() if err != nil { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, err.Error()) } // Get the generator, setup and validate all required parameters @@ -144,7 +143,7 @@ func RunExpose(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri generators := f.Generators("expose") generator, found := generators[generatorName] if !found { - return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName)) + return cmdutil.UsageErrorf(cmd, "generator %q not found.", generatorName) } names := generator.ParamNames() @@ -170,7 +169,7 @@ func RunExpose(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if s, found := params["selector"]; found && kubectl.IsZero(s) { s, err := f.MapBasedSelectorForObject(info.Object) if err != nil { - return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err)) + return cmdutil.UsageErrorf(cmd, "couldn't retrieve selectors via --selector flag or introspection: %v", err) } params["selector"] = s } @@ -182,12 +181,12 @@ func RunExpose(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if port, found := params["port"]; found && kubectl.IsZero(port) { ports, err := f.PortsForObject(info.Object) if err != nil { - return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find port via --port flag or introspection: %s", err)) + return cmdutil.UsageErrorf(cmd, "couldn't find port via --port flag or introspection: %v", err) } switch len(ports) { case 0: if !isHeadlessService { - return cmdutil.UsageError(cmd, "couldn't find port via --port flag or introspection") + return cmdutil.UsageErrorf(cmd, "couldn't find port via --port flag or introspection") } case 1: params["port"] = ports[0] @@ -201,7 +200,7 @@ func RunExpose(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if _, found := params["protocol"]; found { protocolsMap, err := f.ProtocolsForObject(info.Object) if err != nil { - return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find protocol via introspection: %s", err)) + return cmdutil.UsageErrorf(cmd, "couldn't find protocol via introspection: %v", err) } if protocols := kubectl.MakeProtocols(protocolsMap); !kubectl.IsZero(protocols) { params["protocols"] = protocols diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index 833b86da611..eb3bea9ab82 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -51,7 +51,7 @@ type GetOptions struct { } var ( - get_long = templates.LongDesc(` + getLong = templates.LongDesc(` Display one or many resources. ` + validResources + ` @@ -63,7 +63,7 @@ var ( By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resources.`) - get_example = templates.Examples(i18n.T(` + getExample = templates.Examples(i18n.T(` # List all pods in ps output format. kubectl get pods @@ -115,8 +115,8 @@ func NewCmdGet(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Comman cmd := &cobra.Command{ Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", Short: i18n.T("Display one or many resources"), - Long: get_long, - Example: get_example, + Long: getLong, + Example: getExample, Run: func(cmd *cobra.Command, args []string) { err := RunGet(f, out, errOut, cmd, args, options) cmdutil.CheckErr(err) @@ -182,7 +182,7 @@ func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args [ enforceNamespace = false } - if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { fmt.Fprint(errOut, "You must specify the type of resource to get. ", validResources) fullCmdName := cmd.Parent().CommandPath() @@ -191,7 +191,7 @@ func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args [ usageString = fmt.Sprintf("%s\nUse \"%s explain \" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName) } - return cmdutil.UsageError(cmd, usageString) + return cmdutil.UsageErrorf(cmd, usageString) } export := cmdutil.GetFlagBool(cmd, "export") diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 33f2a1e9c33..1d8776f9d83 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -115,10 +115,10 @@ func NewCmdLabel(f cmdutil.Factory, out io.Writer) *cobra.Command { Example: labelExample, Run: func(cmd *cobra.Command, args []string) { if err := options.Complete(out, cmd, args); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) } if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) } cmdutil.CheckErr(options.RunLabel(f, cmd)) }, @@ -162,7 +162,7 @@ func (o *LabelOptions) Complete(out io.Writer, cmd *cobra.Command, args []string // Validate checks to the LabelOptions to see if there is sufficient information run the command. func (o *LabelOptions) Validate() error { - if len(o.resources) < 1 && cmdutil.IsFilenameEmpty(o.FilenameOptions.Filenames) { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { return fmt.Errorf("one or more resources must be specified as or /") } if len(o.newLabels) < 1 && len(o.removeLabels) < 1 { diff --git a/pkg/kubectl/cmd/logs.go b/pkg/kubectl/cmd/logs.go index dc71d58b536..f0fbdd87cd9 100644 --- a/pkg/kubectl/cmd/logs.go +++ b/pkg/kubectl/cmd/logs.go @@ -85,7 +85,7 @@ type LogsOptions struct { Out io.Writer } -// NewCmdLog creates a new pod logs command +// NewCmdLogs creates a new pod logs command func NewCmdLogs(f cmdutil.Factory, out io.Writer) *cobra.Command { o := &LogsOptions{} cmd := &cobra.Command{ @@ -127,21 +127,21 @@ func (o *LogsOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.Comm switch len(args) { case 0: if len(selector) == 0 { - return cmdutil.UsageError(cmd, logsUsageStr) + return cmdutil.UsageErrorf(cmd, "%s", logsUsageStr) } case 1: o.ResourceArg = args[0] if len(selector) != 0 { - return cmdutil.UsageError(cmd, "only a selector (-l) or a POD name is allowed") + return cmdutil.UsageErrorf(cmd, "only a selector (-l) or a POD name is allowed") } case 2: if cmd.Flag("container").Changed { - return cmdutil.UsageError(cmd, "only one of -c or an inline [CONTAINER] arg is allowed") + return cmdutil.UsageErrorf(cmd, "only one of -c or an inline [CONTAINER] arg is allowed") } o.ResourceArg = args[0] containerName = args[1] default: - return cmdutil.UsageError(cmd, logsUsageStr) + return cmdutil.UsageErrorf(cmd, "%s", logsUsageStr) } var err error o.Namespace, _, err = f.DefaultNamespace() @@ -183,7 +183,7 @@ func (o *LogsOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.Comm if len(selector) != 0 { if logOptions.Follow { - return cmdutil.UsageError(cmd, "only one of follow (-f) or selector (-l) is allowed") + return cmdutil.UsageErrorf(cmd, "only one of follow (-f) or selector (-l) is allowed") } if logOptions.TailLines == nil { logOptions.TailLines = &selectorTail diff --git a/pkg/kubectl/cmd/patch.go b/pkg/kubectl/cmd/patch.go index 96a0b6d3852..431e89619f3 100644 --- a/pkg/kubectl/cmd/patch.go +++ b/pkg/kubectl/cmd/patch.go @@ -136,20 +136,21 @@ func RunPatch(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin ok := false patchType, ok = patchTypes[patchTypeString] if !ok { - return cmdutil.UsageError(cmd, fmt.Sprintf("--type must be one of %v, not %q", sets.StringKeySet(patchTypes).List(), patchTypeString)) + return cmdutil.UsageErrorf(cmd, "--type must be one of %v, not %q", + sets.StringKeySet(patchTypes).List(), patchTypeString) } } patch := cmdutil.GetFlagString(cmd, "patch") if len(patch) == 0 { - return cmdutil.UsageError(cmd, "Must specify -p to patch") + return cmdutil.UsageErrorf(cmd, "Must specify -p to patch") } patchBytes, err := yaml.ToJSON([]byte(patch)) if err != nil { return fmt.Errorf("unable to parse %q: %v", patch, err) } - // TODO: fix --local to work with thirdpartyresources without making use of the discovery client. + // TODO: fix --local to work with customresources without making use of the discovery client. // https://github.com/kubernetes/kubernetes/issues/46722 builder, err := f.NewUnstructuredBuilder(true) if err != nil { diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go index 75e9e33ef37..bcf3e694f4b 100644 --- a/pkg/kubectl/cmd/portforward.go +++ b/pkg/kubectl/cmd/portforward.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" "io" + "net/http" "net/url" "os" "os/signal" @@ -28,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/portforward" - "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport/spdy" "k8s.io/kubernetes/pkg/api" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" @@ -81,7 +82,7 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma cmdutil.CheckErr(err) } if err := opts.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err.Error())) } if err := opts.RunPortForward(); err != nil { cmdutil.CheckErr(err) @@ -102,10 +103,11 @@ type defaultPortForwarder struct { } func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts PortForwardOptions) error { - dialer, err := remotecommand.NewExecutor(opts.Config, method, url) + transport, upgrader, err := spdy.RoundTripperFor(opts.Config) if err != nil { return err } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, method, url) fw, err := portforward.New(dialer, opts.Ports, opts.StopChannel, opts.ReadyChannel, f.cmdOut, f.cmdErr) if err != nil { return err @@ -118,7 +120,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg var err error o.PodName = cmdutil.GetFlagString(cmd, "pod") if len(o.PodName) == 0 && len(args) == 0 { - return cmdutil.UsageError(cmd, "POD is required for port-forward") + return cmdutil.UsageErrorf(cmd, "POD is required for port-forward") } if len(o.PodName) != 0 { diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 32183579c7c..d73abe52e7b 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -103,8 +103,8 @@ func RunReplace(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str } force := cmdutil.GetFlagBool(cmd, "force") - if cmdutil.IsFilenameEmpty(options.Filenames) { - return cmdutil.UsageError(cmd, "Must specify --filename to replace") + if cmdutil.IsFilenameSliceEmpty(options.Filenames) { + return cmdutil.UsageErrorf(cmd, "Must specify --filename to replace") } shortOutput := cmdutil.GetFlagString(cmd, "output") == "name" diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 274e36190e5..9940c9f6e60 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -114,26 +114,26 @@ func validateArguments(cmd *cobra.Command, filenames, args []string) error { errors := []error{} if len(deploymentKey) == 0 { - errors = append(errors, cmdutil.UsageError(cmd, "--deployment-label-key can not be empty")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "--deployment-label-key can not be empty")) } if len(filenames) > 1 { - errors = append(errors, cmdutil.UsageError(cmd, "May only specify a single filename for new controller")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "May only specify a single filename for new controller")) } if !rollback { if len(filenames) == 0 && len(image) == 0 { - errors = append(errors, cmdutil.UsageError(cmd, "Must specify --filename or --image for new controller")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "Must specify --filename or --image for new controller")) } else if len(filenames) != 0 && len(image) != 0 { - errors = append(errors, cmdutil.UsageError(cmd, "--filename and --image can not both be specified")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "--filename and --image can not both be specified")) } } else { if len(filenames) != 0 || len(image) != 0 { - errors = append(errors, cmdutil.UsageError(cmd, "Don't specify --filename or --image on rollback")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "Don't specify --filename or --image on rollback")) } } if len(args) < 1 { - errors = append(errors, cmdutil.UsageError(cmd, "Must specify the controller to update")) + errors = append(errors, cmdutil.UsageErrorf(cmd, "Must specify the controller to update")) } return utilerrors.NewAggregate(errors) @@ -213,20 +213,20 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args // when creating resource(s) from a stream. if list, ok := obj.(*api.List); ok { if len(list.Items) > 1 { - return cmdutil.UsageError(cmd, "%s specifies multiple items", filename) + return cmdutil.UsageErrorf(cmd, "%s specifies multiple items", filename) } if len(list.Items) == 0 { - return cmdutil.UsageError(cmd, "please make sure %s exists and is not empty", filename) + return cmdutil.UsageErrorf(cmd, "please make sure %s exists and is not empty", filename) } obj = list.Items[0] } newRc, ok = obj.(*api.ReplicationController) if !ok { if gvks, _, err := typer.ObjectKinds(obj); err == nil { - return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvks[0]) + return cmdutil.UsageErrorf(cmd, "%s contains a %v not a ReplicationController", filename, gvks[0]) } glog.V(4).Infof("Object %#v is not a ReplicationController", obj) - return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) + return cmdutil.UsageErrorf(cmd, "%s does not specify a valid ReplicationController", filename) } infos, err := request.Infos() if err != nil || len(infos) != 1 { @@ -247,7 +247,7 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args } if newRc != nil { if inProgressImage := newRc.Spec.Template.Spec.Containers[0].Image; inProgressImage != image { - return cmdutil.UsageError(cmd, "Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage) + return cmdutil.UsageErrorf(cmd, "Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage) } fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name) } else { @@ -261,7 +261,7 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args } if oldRc.Spec.Template.Spec.Containers[0].Image == image { if len(pullPolicy) == 0 { - return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") + return cmdutil.UsageErrorf(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") } config.PullPolicy = api.PullPolicy(pullPolicy) } @@ -292,12 +292,12 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args } if newRc == nil { - return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName) + return cmdutil.UsageErrorf(cmd, "Could not find %s to rollback.\n", newName) } } if oldName == newRc.Name { - return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s", + return cmdutil.UsageErrorf(cmd, "%s cannot have the same name as the existing ReplicationController %s", filename, oldName) } @@ -314,7 +314,7 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args } } if !hasLabel { - return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s", + return cmdutil.UsageErrorf(cmd, "%s must specify a matching key with non-equal value in Selector for %s", filename, oldName) } // TODO: handle scales during rolling update diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index 20785d4a890..b236176813b 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -66,8 +66,8 @@ func NewCmdRolloutHistory(f cmdutil.Factory, out io.Writer) *cobra.Command { } func RunHistory(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string, options *resource.FilenameOptions) error { - if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { - return cmdutil.UsageError(cmd, "Required resource not specified.") + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { + return cmdutil.UsageErrorf(cmd, "Required resource not specified.") } revision := cmdutil.GetFlagInt64(cmd, "revision") if revision < 0 { diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index b436305f99d..baf6d589360 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -96,8 +96,8 @@ func NewCmdRolloutPause(f cmdutil.Factory, out io.Writer) *cobra.Command { } func (o *PauseConfig) CompletePause(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameEmpty(o.Filenames) { - return cmdutil.UsageError(cmd, cmd.Use) + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } o.Mapper, o.Typer = f.Object() diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index e5560082586..cf370f4ca0a 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -94,8 +94,8 @@ func NewCmdRolloutResume(f cmdutil.Factory, out io.Writer) *cobra.Command { } func (o *ResumeConfig) CompleteResume(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameEmpty(o.Filenames) { - return cmdutil.UsageError(cmd, cmd.Use) + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } o.Mapper, o.Typer = f.Object() diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index 744d9e6f2b4..548722f45be 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -73,8 +73,8 @@ func NewCmdRolloutStatus(f cmdutil.Factory, out io.Writer) *cobra.Command { } func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string, options *resource.FilenameOptions) error { - if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { - return cmdutil.UsageError(cmd, "Required resource not specified.") + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { + return cmdutil.UsageErrorf(cmd, "Required resource not specified.") } cmdNamespace, enforceNamespace, err := f.DefaultNamespace() diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index 0614d0f0dd3..abbea42e507 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -96,8 +96,8 @@ func NewCmdRolloutUndo(f cmdutil.Factory, out io.Writer) *cobra.Command { } func (o *UndoOptions) CompleteUndo(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameEmpty(o.Filenames) { - return cmdutil.UsageError(cmd, "Required resource not specified.") + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return cmdutil.UsageErrorf(cmd, "Required resource not specified.") } o.ToRevision = cmdutil.GetFlagInt64(cmd, "to-revision") diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index af1fe13dac5..8946b0f20cf 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -116,7 +116,8 @@ func addRunFlags(cmd *cobra.Command) { cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.") cmd.Flags().Bool("rm", false, "If true, delete resources created in this command for attached containers.") cmd.Flags().String("overrides", "", i18n.T("An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.")) - cmd.Flags().StringSlice("env", []string{}, "Environment variables to set in the container") + cmd.Flags().StringArray("env", []string{}, "Environment variables to set in the container") + cmd.Flags().String("serviceaccount", "", "Service account to set in the pod spec") cmd.Flags().String("port", "", i18n.T("The port that this container exposes. If --expose is true, this is also the port used by the service that is created.")) cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.") cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s).") @@ -138,12 +139,12 @@ func addRunFlags(cmd *cobra.Command) { func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobra.Command, args []string, argsLenAtDash int) error { // Let kubectl run follow rules for `--`, see #13004 issue if len(args) == 0 || argsLenAtDash == 0 { - return cmdutil.UsageError(cmd, "NAME is required for run") + return cmdutil.UsageErrorf(cmd, "NAME is required for run") } timeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%v", err) } // validate image name @@ -156,11 +157,11 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c interactive := cmdutil.GetFlagBool(cmd, "stdin") tty := cmdutil.GetFlagBool(cmd, "tty") if tty && !interactive { - return cmdutil.UsageError(cmd, "-i/--stdin is required for containers with -t/--tty=true") + return cmdutil.UsageErrorf(cmd, "-i/--stdin is required for containers with -t/--tty=true") } replicas := cmdutil.GetFlagInt(cmd, "replicas") if interactive && replicas != 1 { - return cmdutil.UsageError(cmd, fmt.Sprintf("-i/--stdin requires that replicas is 1, found %d", replicas)) + return cmdutil.UsageErrorf(cmd, "-i/--stdin requires that replicas is 1, found %d", replicas) } namespace, _, err := f.DefaultNamespace() @@ -172,7 +173,7 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c return err } if restartPolicy != api.RestartPolicyAlways && replicas != 1 { - return cmdutil.UsageError(cmd, fmt.Sprintf("--restart=%s requires that --replicas=1, found %d", restartPolicy, replicas)) + return cmdutil.UsageErrorf(cmd, "--restart=%s requires that --replicas=1, found %d", restartPolicy, replicas) } attachFlag := cmd.Flags().Lookup("attach") @@ -184,11 +185,11 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c remove := cmdutil.GetFlagBool(cmd, "rm") if !attach && remove { - return cmdutil.UsageError(cmd, "--rm should only be used for attached containers") + return cmdutil.UsageErrorf(cmd, "--rm should only be used for attached containers") } if attach && cmdutil.GetDryRunFlag(cmd) { - return cmdutil.UsageError(cmd, "--dry-run can't be used with attached containers options (--attach, --stdin, or --tty)") + return cmdutil.UsageErrorf(cmd, "--dry-run can't be used with attached containers options (--attach, --stdin, or --tty)") } if err := verifyImagePullPolicy(cmd); err != nil { @@ -242,7 +243,7 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c generators := f.Generators("run") generator, found := generators[generatorName] if !found { - return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName)) + return cmdutil.UsageErrorf(cmd, "generator %q not found", generatorName) } names := generator.ParamNames() params := kubectl.MakeParams(cmd, names) @@ -251,7 +252,7 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c params["args"] = args[1:] } - params["env"] = cmdutil.GetFlagStringSlice(cmd, "env") + params["env"] = cmdutil.GetFlagStringArray(cmd, "env") obj, _, mapper, mapping, err := createGeneratedObject(f, cmd, generator, names, params, cmdutil.GetFlagString(cmd, "overrides"), namespace) if err != nil { @@ -261,7 +262,7 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c if cmdutil.GetFlagBool(cmd, "expose") { serviceGenerator := cmdutil.GetFlagString(cmd, "service-generator") if len(serviceGenerator) == 0 { - return cmdutil.UsageError(cmd, fmt.Sprintf("No service generator specified")) + return cmdutil.UsageErrorf(cmd, "No service generator specified") } if err := generateService(f, cmd, args, serviceGenerator, params, namespace, cmdOut); err != nil { return err @@ -362,12 +363,13 @@ func RunRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *c return unknownRcErr } return uexec.CodeExitError{ - Err: fmt.Errorf("pod %s/%s terminated", pod.Namespace, pod.Name), + Err: fmt.Errorf("pod %s/%s terminated (%s)\n%s", pod.Namespace, pod.Name, pod.Status.ContainerStatuses[0].State.Terminated.Reason, pod.Status.ContainerStatuses[0].State.Terminated.Message), Code: int(rc), } default: return fmt.Errorf("pod %s/%s left in phase %s", pod.Namespace, pod.Name, pod.Status.Phase) } + } outputFormat := cmdutil.GetFlagString(cmd, "output") @@ -491,9 +493,8 @@ func getRestartPolicy(cmd *cobra.Command, interactive bool) (api.RestartPolicy, return api.RestartPolicyOnFailure, nil case api.RestartPolicyNever: return api.RestartPolicyNever, nil - default: - return "", cmdutil.UsageError(cmd, fmt.Sprintf("invalid restart policy: %s", restart)) } + return "", cmdutil.UsageErrorf(cmd, "invalid restart policy: %s") } func verifyImagePullPolicy(cmd *cobra.Command) error { @@ -503,9 +504,8 @@ func verifyImagePullPolicy(cmd *cobra.Command) error { return nil case "": return nil - default: - return cmdutil.UsageError(cmd, fmt.Sprintf("invalid image pull policy: %s", pullPolicy)) } + return cmdutil.UsageErrorf(cmd, "invalid image pull policy: %s", pullPolicy) } func generateService(f cmdutil.Factory, cmd *cobra.Command, args []string, serviceGenerator string, paramsIn map[string]interface{}, namespace string, out io.Writer) error { diff --git a/pkg/kubectl/cmd/run_test.go b/pkg/kubectl/cmd/run_test.go index ed3b7860a9e..516f0e00b46 100644 --- a/pkg/kubectl/cmd/run_test.go +++ b/pkg/kubectl/cmd/run_test.go @@ -168,12 +168,11 @@ func TestRunArgsFollowDashRules(t *testing.T) { Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { if req.URL.Path == "/namespaces/test/replicationcontrollers" { return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, rc)}, nil - } else { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewBuffer([]byte("{}"))), - }, nil } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("{}"))), + }, nil }), } tf.Namespace = "test" @@ -297,14 +296,12 @@ func TestGenerateService(t *testing.T) { body := objBody(codec, &test.service) data, err := ioutil.ReadAll(req.Body) if err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() + t.Fatalf("unexpected error: %v", err) } defer req.Body.Close() svc := &api.Service{} if err := runtime.DecodeInto(codec, data, svc); err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() + t.Fatalf("unexpected error: %v", err) } // Copy things that are defaulted by the system test.service.Annotations = svc.Annotations diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index 722c17f8f1b..c8579cdcf4e 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -108,7 +108,7 @@ func RunScale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin Do() err = r.Err() if resource.IsUsageError(err) { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%v", err) } if err != nil { return err @@ -116,7 +116,7 @@ func RunScale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin count := cmdutil.GetFlagInt(cmd, "replicas") if count < 0 { - return cmdutil.UsageError(cmd, "The --replicas=COUNT flag is required, and COUNT must be greater than or equal to 0") + return cmdutil.UsageErrorf(cmd, "The --replicas=COUNT flag is required, and COUNT must be greater than or equal to 0") } infos := []*resource.Info{} diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 74daefc3449..b6eaf019d57 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -158,7 +158,7 @@ func (o *ImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st func (o *ImageOptions) Validate() error { errors := []error{} - if len(o.Resources) < 1 && cmdutil.IsFilenameEmpty(o.Filenames) { + if len(o.Resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { errors = append(errors, fmt.Errorf("one or more resources must be specified as or /")) } if len(o.ContainerImages) < 1 { diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 3cbb8597077..5e055c9e178 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -116,6 +116,10 @@ func (o *SelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ mapper, _ := f.Object() o.mapper = mapper o.encoder = f.JSONEncoder() + o.resources, o.selector, err = getResourcesAndSelector(args) + if err != nil { + return err + } o.builder = f.NewBuilder(!o.local). ContinueOnError(). @@ -135,14 +139,12 @@ func (o *SelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { return f.ClientForMapping(mapping) } - - o.resources, o.selector, err = getResourcesAndSelector(args) return err } // Validate basic inputs func (o *SelectorOptions) Validate() error { - if len(o.resources) < 1 && cmdutil.IsFilenameEmpty(o.fileOptions.Filenames) { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.fileOptions.Filenames) { return fmt.Errorf("one or more resources must be specified as or /") } if o.selector == nil { diff --git a/pkg/kubectl/cmd/taint.go b/pkg/kubectl/cmd/taint.go index 2b2cb42933b..ca9c0547715 100644 --- a/pkg/kubectl/cmd/taint.go +++ b/pkg/kubectl/cmd/taint.go @@ -102,7 +102,7 @@ func NewCmdTaint(f cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(err) } if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) } if err := options.RunTaint(); err != nil { cmdutil.CheckErr(err) @@ -244,7 +244,7 @@ func (o *TaintOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.Com } if o.taintsToAdd, o.taintsToRemove, err = parseTaints(taintArgs); err != nil { - return cmdutil.UsageError(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, err.Error()) } o.builder = f.NewBuilder(true). ContinueOnError(). diff --git a/pkg/kubectl/cmd/templates/BUILD b/pkg/kubectl/cmd/templates/BUILD index 4ee1ce4ab0f..7255c533561 100644 --- a/pkg/kubectl/cmd/templates/BUILD +++ b/pkg/kubectl/cmd/templates/BUILD @@ -19,7 +19,7 @@ go_library( "//build/visible_to:pkg_kubectl_cmd_templates_CONSUMERS", ], deps = [ - "//pkg/util/term:go_default_library", + "//pkg/kubectl/util/term:go_default_library", "//vendor/github.com/MakeNowJust/heredoc:go_default_library", "//vendor/github.com/russross/blackfriday:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/templates/templater.go b/pkg/kubectl/cmd/templates/templater.go index 30c128ef3b2..27b76dc42b7 100644 --- a/pkg/kubectl/cmd/templates/templater.go +++ b/pkg/kubectl/cmd/templates/templater.go @@ -23,7 +23,7 @@ import ( "text/template" "unicode" - "k8s.io/kubernetes/pkg/util/term" + "k8s.io/kubernetes/pkg/kubectl/util/term" "github.com/spf13/cobra" flag "github.com/spf13/pflag" diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/0.request b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/0.request new file mode 100755 index 00000000000..e69de29bb2d diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/0.response b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/0.response new file mode 100755 index 00000000000..9d4d9c7750a --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/0.response @@ -0,0 +1,38 @@ +{ + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"kind\":\"Service\",\"apiVersion\":\"v1\",\"metadata\":{\"name\":\"svc1\",\"creationTimestamp\":null,\"labels\":{\"app\":\"svc1\"}},\"spec\":{\"ports\":[{\"name\":\"80\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":80}],\"selector\":{\"app\":\"svc1\"},\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n" + }, + "creationTimestamp": "2017-02-27T19:40:53Z", + "labels": { + "app": "svc1" + }, + "name": "svc1", + "namespace": "edit-test", + "resourceVersion": "670", + "selfLink": "/api/v1/namespaces/edit-test/services/svc1", + "uid": "a6c11186-fd24-11e6-b53c-480fcf4a5275" + }, + "spec": { + "clusterIP": "10.0.0.204", + "ports": [ + { + "name": "80", + "port": 80, + "protocol": "TCP", + "targetPort": 80 + } + ], + "selector": { + "app": "svc1" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } +} + diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.edited b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.edited new file mode 100755 index 00000000000..cce0483ddab --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.edited @@ -0,0 +1,32 @@ +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +kind: Service +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"kind":"Service","apiVersion":"v1","metadata":{"name":"svc1","creationTimestamp":null,"labels":{"app":"svc1"}},"spec":{"ports":[{"name":"80","protocol":"TCP","port":80,"targetPort":80}],"selector":{"app":"svc1"},"type":"ClusterIP"},"status":{"loadBalancer":{}}} + creationTimestamp: 2017-02-27T19:40:53Z + labels: + app: svc1 + new-label: new-value + name: svc1 + namespace: edit-test + resourceVersion: "670" + selfLink: /api/v1/namespaces/edit-test/services/svc1 + uid: a6c11186-fd24-11e6-b53c-480fcf4a5275 +spec: + clusterIP: 10.0.0.204 + ports: + - name: "80" + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: svc1 + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.original b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.original new file mode 100755 index 00000000000..1748e03ca75 --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/1.original @@ -0,0 +1,31 @@ +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +kind: Service +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"kind":"Service","apiVersion":"v1","metadata":{"name":"svc1","creationTimestamp":null,"labels":{"app":"svc1"}},"spec":{"ports":[{"name":"80","protocol":"TCP","port":80,"targetPort":80}],"selector":{"app":"svc1"},"type":"ClusterIP"},"status":{"loadBalancer":{}}} + creationTimestamp: 2017-02-27T19:40:53Z + labels: + app: svc1 + name: svc1 + namespace: edit-test + resourceVersion: "670" + selfLink: /api/v1/namespaces/edit-test/services/svc1 + uid: a6c11186-fd24-11e6-b53c-480fcf4a5275 +spec: + clusterIP: 10.0.0.204 + ports: + - name: "80" + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: svc1 + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.request b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.request new file mode 100755 index 00000000000..b26622634f6 --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.request @@ -0,0 +1,10 @@ +{ + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":\"2017-02-27T19:40:53Z\",\"labels\":{\"app\":\"svc1\",\"new-label\":\"new-value\"},\"name\":\"svc1\",\"namespace\":\"edit-test\",\"resourceVersion\":\"670\",\"selfLink\":\"/api/v1/namespaces/edit-test/services/svc1\",\"uid\":\"a6c11186-fd24-11e6-b53c-480fcf4a5275\"},\"spec\":{\"clusterIP\":\"10.0.0.204\",\"ports\":[{\"name\":\"80\",\"port\":80,\"protocol\":\"TCP\",\"targetPort\":80}],\"selector\":{\"app\":\"svc1\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n" + }, + "labels": { + "new-label": "new-value" + } + } +} \ No newline at end of file diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.response b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.response new file mode 100755 index 00000000000..cf658cb8d1c --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/2.response @@ -0,0 +1,38 @@ +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":\"2017-02-27T19:40:53Z\",\"labels\":{\"app\":\"svc1\",\"new-label\":\"new-value\"},\"name\":\"svc1\",\"namespace\":\"edit-test\",\"resourceVersion\":\"670\",\"selfLink\":\"/api/v1/namespaces/edit-test/services/svc1\",\"uid\":\"a6c11186-fd24-11e6-b53c-480fcf4a5275\"},\"spec\":{\"clusterIP\":\"10.0.0.204\",\"ports\":[{\"name\":\"80\",\"port\":80,\"protocol\":\"TCP\",\"targetPort\":80}],\"selector\":{\"app\":\"svc1\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n" + }, + "name": "svc1", + "namespace": "edit-test", + "selfLink": "/api/v1/namespaces/edit-test/services/svc1", + "uid": "a6c11186-fd24-11e6-b53c-480fcf4a5275", + "resourceVersion":"1045", + "creationTimestamp":"2017-02-27T19:40:53Z", + "labels": { + "app": "svc1", + "new-label": "new-value" + } + }, + "spec": { + "clusterIP": "10.0.0.204", + "ports": [ + { + "name": "80", + "port": 80, + "protocol": "TCP", + "targetPort": 80 + } + ], + "selector": { + "app": "svc1" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } +} diff --git a/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/test.yaml b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/test.yaml new file mode 100644 index 00000000000..0b0f92f9754 --- /dev/null +++ b/pkg/kubectl/cmd/testdata/edit/testcase-edit-output-patch/test.yaml @@ -0,0 +1,32 @@ +# kubectl create namespace edit-test +# kubectl create service clusterip svc1 --tcp 80 --namespace=edit-test --save-config +# kubectl edit service svc1 --namespace=edit-test --save-config=true --output-patch=true +description: edit with flag --output-patch=true should output the patch +mode: edit +args: +- service +- svc1 +saveConfig: "true" +outputPatch: "true" +namespace: edit-test +expectedStdout: +- 'Patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":\"2017-02-27T19:40:53Z\",\"labels\":{\"app\":\"svc1\",\"new-label\":\"new-value\"},\"name\":\"svc1\",\"namespace\":\"edit-test\",\"resourceVersion\":\"670\",\"selfLink\":\"/api/v1/namespaces/edit-test/services/svc1\",\"uid\":\"a6c11186-fd24-11e6-b53c-480fcf4a5275\"},\"spec\":{\"clusterIP\":\"10.0.0.204\",\"ports\":[{\"name\":\"80\",\"port\":80,\"protocol\":\"TCP\",\"targetPort\":80}],\"selector\":{\"app\":\"svc1\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n"},"labels":{"new-label":"new-value"}}}' +- service "svc1" edited +expectedExitCode: 0 +steps: +- type: request + expectedMethod: GET + expectedPath: /api/v1/namespaces/edit-test/services/svc1 + expectedInput: 0.request + resultingStatusCode: 200 + resultingOutput: 0.response +- type: edit + expectedInput: 1.original + resultingOutput: 1.edited +- type: request + expectedMethod: PATCH + expectedPath: /api/v1/namespaces/edit-test/services/svc1 + expectedContentType: application/strategic-merge-patch+json + expectedInput: 2.request + resultingStatusCode: 200 + resultingOutput: 2.response diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index 1ddabbc96b3..e432a457622 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -83,7 +83,7 @@ func NewCmdTopNode(f cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(err) } if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) } if err := options.RunTopNode(); err != nil { cmdutil.CheckErr(err) @@ -101,7 +101,7 @@ func (o *TopNodeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] if len(args) == 1 { o.ResourceName = args[0] } else if len(args) > 1 { - return cmdutil.UsageError(cmd, cmd.Use) + return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } clientset, err := f.ClientSet() diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index 79479dcbf00..689e6e3e2fe 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -85,7 +85,7 @@ func NewCmdTopPod(f cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(err) } if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) } if err := options.RunTopPod(); err != nil { cmdutil.CheckErr(err) @@ -105,7 +105,7 @@ func (o *TopPodOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s if len(args) == 1 { o.ResourceName = args[0] } else if len(args) > 1 { - return cmdutil.UsageError(cmd, cmd.Use) + return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } o.Namespace, _, err = f.DefaultNamespace() diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index d17bbce08ea..722e7e395a4 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -46,8 +46,8 @@ go_library( "//pkg/version:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -108,7 +108,7 @@ go_test( "//pkg/kubectl/resource:go_default_library", "//pkg/util/exec:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/pkg/kubectl/cmd/util/cached_discovery.go b/pkg/kubectl/cmd/util/cached_discovery.go index 5b13e22f525..e48e479eaf0 100644 --- a/pkg/kubectl/cmd/util/cached_discovery.go +++ b/pkg/kubectl/cmd/util/cached_discovery.go @@ -25,8 +25,8 @@ import ( "time" "github.com/emicklei/go-restful-swagger12" - "github.com/go-openapi/spec" "github.com/golang/glog" + "github.com/googleapis/gnostic/OpenAPIv2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -237,7 +237,7 @@ func (d *CachedDiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swa return d.delegate.SwaggerSchema(version) } -func (d *CachedDiscoveryClient) OpenAPISchema() (*spec.Swagger, error) { +func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { return d.delegate.OpenAPISchema() } diff --git a/pkg/kubectl/cmd/util/cached_discovery_test.go b/pkg/kubectl/cmd/util/cached_discovery_test.go index e11e9b48015..ac8799cf5da 100644 --- a/pkg/kubectl/cmd/util/cached_discovery_test.go +++ b/pkg/kubectl/cmd/util/cached_discovery_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/emicklei/go-restful-swagger12" - "github.com/go-openapi/spec" + "github.com/googleapis/gnostic/OpenAPIv2" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/errors" @@ -171,7 +171,7 @@ func (c *fakeDiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagg return &swagger.ApiDeclaration{}, nil } -func (c *fakeDiscoveryClient) OpenAPISchema() (*spec.Swagger, error) { +func (c *fakeDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { c.openAPICalls = c.openAPICalls + 1 - return &spec.Swagger{}, nil + return &openapi_v2.Document{}, nil } diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index 68e18edf45a..328f1f3e55a 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -21,9 +21,9 @@ go_library( "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/resource:go_default_library", + "//pkg/kubectl/util/crlf:go_default_library", + "//pkg/kubectl/util/term:go_default_library", "//pkg/printers:go_default_library", - "//pkg/util/crlf:go_default_library", - "//pkg/util/term:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index df4b88b631b..3b3066201bd 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -44,8 +44,8 @@ import ( "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/kubectl/util/crlf" "k8s.io/kubernetes/pkg/printers" - "k8s.io/kubernetes/pkg/util/crlf" ) // EditOptions contains all the options for running edit cli command. @@ -53,6 +53,7 @@ type EditOptions struct { resource.FilenameOptions Output string + OutputPatch bool WindowsLineEndings bool cmdutil.ValidateOptions @@ -96,6 +97,10 @@ func (o *EditOptions) Complete(f cmdutil.Factory, out, errOut io.Writer, args [] } o.editPrinterOptions = getPrinter(o.Output) + if o.OutputPatch && o.EditMode != NormalEditMode { + return fmt.Errorf("the edit mode doesn't support output the patch") + } + cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err @@ -577,6 +582,10 @@ func (o *EditOptions) visitToPatch( } } + if o.OutputPatch { + fmt.Fprintf(o.Out, "Patch: %s\n", string(patch)) + } + patched, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, patchType, patch) if err != nil { fmt.Fprintln(o.ErrOut, results.addError(err, info)) diff --git a/pkg/kubectl/cmd/util/editor/editor.go b/pkg/kubectl/cmd/util/editor/editor.go index 9f53ee7c758..a53e8c0970a 100644 --- a/pkg/kubectl/cmd/util/editor/editor.go +++ b/pkg/kubectl/cmd/util/editor/editor.go @@ -29,7 +29,7 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/term" + "k8s.io/kubernetes/pkg/kubectl/util/term" ) const ( diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 1e9fc4478b7..bf5f57ce5ab 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -45,7 +45,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/printers" @@ -117,31 +116,32 @@ var ErrExit = fmt.Errorf("exit") // This method is generic to the command in use and may be used by non-Kubectl // commands. func CheckErr(err error) { - checkErr("", err, fatalErrHandler) + checkErr(err, fatalErrHandler) } // checkErrWithPrefix works like CheckErr, but adds a caller-defined prefix to non-nil errors func checkErrWithPrefix(prefix string, err error) { - checkErr(prefix, err, fatalErrHandler) + checkErr(err, fatalErrHandler) } // checkErr formats a given error as a string and calls the passed handleErr // func with that string and an kubectl exit code. -func checkErr(prefix string, err error, handleErr func(string, int)) { +func checkErr(err error, handleErr func(string, int)) { // unwrap aggregates of 1 if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) == 1 { err = agg.Errors()[0] } - switch { - case err == nil: + if err == nil { return + } + + switch { case err == ErrExit: handleErr("", DefaultErrorExitCode) - return case kerrors.IsInvalid(err): details := err.(*kerrors.StatusError).Status().Details - s := fmt.Sprintf("%sThe %s %q is invalid", prefix, details.Kind, details.Name) + s := fmt.Sprintf("The %s %q is invalid", details.Kind, details.Name) if len(details.Causes) > 0 { errs := statusCausesToAggrError(details.Causes) handleErr(MultilineError(s+": ", errs), DefaultErrorExitCode) @@ -149,25 +149,24 @@ func checkErr(prefix string, err error, handleErr func(string, int)) { handleErr(s, DefaultErrorExitCode) } case clientcmd.IsConfigurationInvalid(err): - handleErr(MultilineError(fmt.Sprintf("%sError in configuration: ", prefix), err), DefaultErrorExitCode) + handleErr(MultilineError("Error in configuration: ", err), DefaultErrorExitCode) default: switch err := err.(type) { case *meta.NoResourceMatchError: switch { case len(err.PartialResource.Group) > 0 && len(err.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in group %q and version %q", prefix, err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode) + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode) case len(err.PartialResource.Group) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in group %q", prefix, err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode) + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode) case len(err.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q in version %q", prefix, err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode) + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode) default: - handleErr(fmt.Sprintf("%sthe server doesn't have a resource type %q", prefix, err.PartialResource.Resource), DefaultErrorExitCode) + handleErr(fmt.Sprintf("the server doesn't have a resource type %q", err.PartialResource.Resource), DefaultErrorExitCode) } case utilerrors.Aggregate: - handleErr(MultipleErrors(prefix, err.Errors()), DefaultErrorExitCode) + handleErr(MultipleErrors(``, err.Errors()), DefaultErrorExitCode) case utilexec.ExitError: - // do not print anything, only terminate with given error - handleErr("", err.ExitStatus()) + handleErr(err.Error(), err.ExitStatus()) default: // for any other error type msg, ok := StandardErrorMessage(err) if !ok { @@ -297,26 +296,23 @@ func messageForError(err error) string { return msg } -func UsageError(cmd *cobra.Command, format string, args ...interface{}) error { +func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error { msg := fmt.Sprintf(format, args...) return fmt.Errorf("%s\nSee '%s -h' for help and examples.", msg, cmd.CommandPath()) } -func IsFilenameEmpty(filenames []string) bool { +func IsFilenameSliceEmpty(filenames []string) bool { return len(filenames) == 0 } // Whether this cmd need watching objects. func isWatch(cmd *cobra.Command) bool { - if w, err := cmd.Flags().GetBool("watch"); w && err == nil { + if w, err := cmd.Flags().GetBool("watch"); err == nil && w { return true } - if wo, err := cmd.Flags().GetBool("watch-only"); wo && err == nil { - return true - } - - return false + wo, err := cmd.Flags().GetBool("watch-only") + return err == nil && wo } func GetFlagString(cmd *cobra.Command, flag string) string { @@ -533,7 +529,6 @@ func UpdateObject(info *resource.Info, codec runtime.Codec, updateFn func(runtim return info.Object, nil } -// AddCmdRecordFlag adds --record flag to command func AddRecordFlag(cmd *cobra.Command) { cmd.Flags().Bool("record", false, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.") } @@ -679,18 +674,6 @@ func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPair return } -// MaybeConvertObject attempts to convert an object to a specific group/version. If the object is -// a third party resource it is simply passed through. -func MaybeConvertObject(obj runtime.Object, gv schema.GroupVersion, converter runtime.ObjectConvertor) (runtime.Object, error) { - switch obj.(type) { - case *extensions.ThirdPartyResourceData: - // conversion is not supported for 3rd party objects - return obj, nil - default: - return converter.ConvertToVersion(obj, gv) - } -} - // MustPrintWithKinds determines if printer is dealing // with multiple resource kinds, in which case it will // return true, indicating resource kind will be @@ -802,7 +785,7 @@ func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { // RequireNoArguments exits with a usage error if extra arguments are provided. func RequireNoArguments(c *cobra.Command, args []string) { if len(args) > 0 { - CheckErr(UsageError(c, fmt.Sprintf(`unknown command %q`, strings.Join(args, " ")))) + CheckErr(UsageErrorf(c, "unknown command %q", strings.Join(args, " "))) } } diff --git a/pkg/kubectl/cmd/util/helpers_test.go b/pkg/kubectl/cmd/util/helpers_test.go index ff34381787e..eb64a960213 100644 --- a/pkg/kubectl/cmd/util/helpers_test.go +++ b/pkg/kubectl/cmd/util/helpers_test.go @@ -25,7 +25,6 @@ import ( "syscall" "testing" - "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -36,7 +35,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" apitesting "k8s.io/kubernetes/pkg/api/testing" - "k8s.io/kubernetes/pkg/apis/extensions" uexec "k8s.io/kubernetes/pkg/util/exec" ) @@ -269,7 +267,7 @@ func TestCheckExitError(t *testing.T) { testCheckError(t, []checkErrTestCase{ { uexec.CodeExitError{Err: fmt.Errorf("pod foo/bar terminated"), Code: 42}, - "", + "pod foo/bar terminated", 42, }, }) @@ -284,7 +282,7 @@ func testCheckError(t *testing.T, tests []checkErrTestCase) { } for _, test := range tests { - checkErr("", test.err, errHandle) + checkErr(test.err, errHandle) if errReturned != test.expectedErr { t.Fatalf("Got: %s, expected: %s", errReturned, test.expectedErr) @@ -321,53 +319,3 @@ func TestDumpReaderToFile(t *testing.T) { t.Fatalf("Wrong file content %s != %s", testString, stringData) } } - -func TestMaybeConvert(t *testing.T) { - tests := []struct { - input runtime.Object - gv schema.GroupVersion - expected runtime.Object - }{ - { - input: &api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - gv: schema.GroupVersion{Group: "", Version: "v1"}, - expected: &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Pod", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - }, - { - input: &extensions.ThirdPartyResourceData{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Data: []byte("this is some data"), - }, - expected: &extensions.ThirdPartyResourceData{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Data: []byte("this is some data"), - }, - }, - } - - for _, test := range tests { - obj, err := MaybeConvertObject(test.input, test.gv, testapi.Default.Converter()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !apiequality.Semantic.DeepEqual(test.expected, obj) { - t.Errorf("expected:\n%#v\nsaw:\n%#v\n", test.expected, obj) - } - } -} diff --git a/pkg/kubectl/cmd/util/openapi/BUILD b/pkg/kubectl/cmd/util/openapi/BUILD index 20d3e25e248..fb7ff7fe20a 100644 --- a/pkg/kubectl/cmd/util/openapi/BUILD +++ b/pkg/kubectl/cmd/util/openapi/BUILD @@ -22,6 +22,8 @@ go_library( "//pkg/version:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", @@ -41,12 +43,14 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/kubectl/cmd/util/openapi:go_default_library", - "//vendor/github.com/go-openapi/loads:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", + "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/ginkgo/types:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/util/openapi/openapi.go b/pkg/kubectl/cmd/util/openapi/openapi.go index de07e06bd87..0da0cf60340 100644 --- a/pkg/kubectl/cmd/util/openapi/openapi.go +++ b/pkg/kubectl/cmd/util/openapi/openapi.go @@ -20,8 +20,10 @@ import ( "fmt" "strings" - "github.com/go-openapi/spec" + "gopkg.in/yaml.v2" + "github.com/golang/glog" + "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -98,7 +100,7 @@ type Kind struct { PrimitiveType string // Extensions are openapi extensions for the object definition. - Extensions spec.Extensions + Extensions map[string]interface{} // Fields are the fields defined for this Kind Fields map[string]Type @@ -130,21 +132,45 @@ type Type struct { // Extensions are extensions for this field and may contain // metadata from the types.go struct field tags. // e.g. contains patchStrategy, patchMergeKey, etc - Extensions spec.Extensions + Extensions map[string]interface{} +} + +func vendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + var values map[string]interface{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + if values == nil { + values = make(map[string]interface{}) + } + values[na.GetName()] = value + } + + return values } // NewOpenAPIData parses the resource definitions in openapi data by groupversionkind and name -func NewOpenAPIData(s *spec.Swagger) (*Resources, error) { +func NewOpenAPIData(doc *openapi_v2.Document) (*Resources, error) { o := &Resources{ GroupVersionKindToName: map[schema.GroupVersionKind]string{}, NameToDefinition: map[string]Kind{}, } // Parse and index definitions by name - for name, d := range s.Definitions { - definition := o.parseDefinition(name, d) - o.NameToDefinition[name] = definition + for _, ns := range doc.GetDefinitions().GetAdditionalProperties() { + definition := o.parseDefinition(ns.GetName(), ns.GetValue()) + o.NameToDefinition[ns.GetName()] = definition if len(definition.GroupVersionKind.Kind) > 0 { - o.GroupVersionKindToName[definition.GroupVersionKind] = name + o.GroupVersionKindToName[definition.GroupVersionKind] = ns.GetName() } } @@ -185,12 +211,12 @@ func (o *Resources) getTypeNames(elem Type) []string { return t } -func (o *Resources) parseDefinition(name string, s spec.Schema) Kind { +func (o *Resources) parseDefinition(name string, s *openapi_v2.Schema) Kind { gvk, err := o.getGroupVersionKind(s) value := Kind{ Name: name, GroupVersionKind: gvk, - Extensions: s.Extensions, + Extensions: vendorExtensionToMap(s.GetVendorExtension()), Fields: map[string]Type{}, } if err != nil { @@ -202,13 +228,13 @@ func (o *Resources) parseDefinition(name string, s spec.Schema) Kind { if o.isPrimitive(s) { value.PrimitiveType = o.getTypeNameForField(s) } - for fieldname, property := range s.Properties { - value.Fields[fieldname] = o.parseField(property) + for _, ns := range s.GetProperties().GetAdditionalProperties() { + value.Fields[ns.GetName()] = o.parseField(ns.GetValue()) } return value } -func (o *Resources) parseField(s spec.Schema) Type { +func (o *Resources) parseField(s *openapi_v2.Schema) Type { def := Type{ TypeName: o.getTypeNameForField(s), IsPrimitive: o.isPrimitive(s), @@ -225,14 +251,14 @@ func (o *Resources) parseField(s spec.Schema) Type { def.ElementType = &d } - def.Extensions = s.Extensions + def.Extensions = vendorExtensionToMap(s.GetVendorExtension()) return def } // isArray returns true if s is an array type. -func (o *Resources) isArray(s spec.Schema) bool { - if len(s.Properties) > 0 { +func (o *Resources) isArray(s *openapi_v2.Schema) bool { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { // Open API can have embedded type definitions, but Kubernetes doesn't generate these. // This should just be a sanity check against changing the format. return false @@ -241,8 +267,8 @@ func (o *Resources) isArray(s spec.Schema) bool { } // isMap returns true if s is a map type. -func (o *Resources) isMap(s spec.Schema) bool { - if len(s.Properties) > 0 { +func (o *Resources) isMap(s *openapi_v2.Schema) bool { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { // Open API can have embedded type definitions, but Kubernetes doesn't generate these. // This should just be a sanity check against changing the format. return false @@ -253,8 +279,8 @@ func (o *Resources) isMap(s spec.Schema) bool { // isPrimitive returns true if s is a primitive type // Note: For object references that represent primitive types - e.g. IntOrString - this will // be false, and the referenced Kind will have a non-empty "PrimitiveType". -func (o *Resources) isPrimitive(s spec.Schema) bool { - if len(s.Properties) > 0 { +func (o *Resources) isPrimitive(s *openapi_v2.Schema) bool { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { // Open API can have embedded type definitions, but Kubernetes doesn't generate these. // This should just be a sanity check against changing the format. return false @@ -266,96 +292,96 @@ func (o *Resources) isPrimitive(s spec.Schema) bool { return false } -func (*Resources) getType(s spec.Schema) string { - if len(s.Type) != 1 { +func (*Resources) getType(s *openapi_v2.Schema) string { + if len(s.GetType().GetValue()) != 1 { return "" } - return strings.ToLower(s.Type[0]) + return strings.ToLower(s.GetType().GetValue()[0]) } -func (o *Resources) getTypeNameForField(s spec.Schema) string { +func (o *Resources) getTypeNameForField(s *openapi_v2.Schema) string { // Get the reference for complex types if o.isDefinitionReference(s) { return o.nameForDefinitionField(s) } // Recurse if type is array if o.isArray(s) { - return fmt.Sprintf("%s array", o.getTypeNameForField(*s.Items.Schema)) + return fmt.Sprintf("%s array", o.getTypeNameForField(s.GetItems().GetSchema()[0])) } if o.isMap(s) { - return fmt.Sprintf("%s map", o.getTypeNameForField(*s.AdditionalProperties.Schema)) + return fmt.Sprintf("%s map", o.getTypeNameForField(s.GetAdditionalProperties().GetSchema())) } // Get the value for primitive types if o.isPrimitive(s) { - return fmt.Sprintf("%s", s.Type[0]) + return fmt.Sprintf("%s", s.GetType().GetValue()[0]) } return "" } // isDefinitionReference returns true s is a complex type that should have a Kind. -func (o *Resources) isDefinitionReference(s spec.Schema) bool { - if len(s.Properties) > 0 { +func (o *Resources) isDefinitionReference(s *openapi_v2.Schema) bool { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { // Open API can have embedded type definitions, but Kubernetes doesn't generate these. // This should just be a sanity check against changing the format. return false } - if len(s.Type) > 0 { + if len(s.GetType().GetValue()) > 0 { // Definition references won't have a type return false } - p := s.SchemaProps.Ref.GetPointer().String() - return len(p) > 0 && strings.HasPrefix(p, "/definitions/") + p := s.GetXRef() + return len(p) > 0 && strings.HasPrefix(p, "#/definitions/") } // getElementType returns the type of an element for arrays // returns an error if s is not an array. -func (o *Resources) getElementType(s spec.Schema) (spec.Schema, error) { +func (o *Resources) getElementType(s *openapi_v2.Schema) (*openapi_v2.Schema, error) { if !o.isArray(s) { - return spec.Schema{}, fmt.Errorf("%v is not an array type", s.Type) + return &openapi_v2.Schema{}, fmt.Errorf("%v is not an array type", o.getTypeNameForField(s)) } - return *s.Items.Schema, nil + return s.GetItems().GetSchema()[0], nil } -// getElementType returns the type of an element for maps +// getValueType returns the type of an element for maps // returns an error if s is not a map. -func (o *Resources) getValueType(s spec.Schema) (spec.Schema, error) { +func (o *Resources) getValueType(s *openapi_v2.Schema) (*openapi_v2.Schema, error) { if !o.isMap(s) { - return spec.Schema{}, fmt.Errorf("%v is not an map type", s.Type) + return &openapi_v2.Schema{}, fmt.Errorf("%v is not an map type", o.getTypeNameForField(s)) } - return *s.AdditionalProperties.Schema, nil + return s.GetAdditionalProperties().GetSchema(), nil } // nameForDefinitionField returns the definition name for the schema (field) if it is a complex type -func (o *Resources) nameForDefinitionField(s spec.Schema) string { - p := s.SchemaProps.Ref.GetPointer().String() +func (o *Resources) nameForDefinitionField(s *openapi_v2.Schema) string { + p := s.GetXRef() if len(p) == 0 { return "" } // Strip the "definitions/" pieces of the reference - return strings.Replace(p, "/definitions/", "", -1) + return strings.Replace(p, "#/definitions/", "", -1) } // getGroupVersionKind implements OpenAPIData // getGVK parses the gropuversionkind for a resource definition from the x-kubernetes // extensions -// Expected format for s.Extensions: map[string][]map[string]string // map[x-kubernetes-group-version-kind:[map[Group:authentication.k8s.io Version:v1 Kind:TokenReview]]] -func (o *Resources) getGroupVersionKind(s spec.Schema) (schema.GroupVersionKind, error) { +func (o *Resources) getGroupVersionKind(s *openapi_v2.Schema) (schema.GroupVersionKind, error) { empty := schema.GroupVersionKind{} + extensionMap := vendorExtensionToMap(s.GetVendorExtension()) // Get the extensions - extList, f := s.Extensions[groupVersionKindExtensionKey] + extList, f := extensionMap[groupVersionKindExtensionKey] if !f { - return empty, fmt.Errorf("No %s extension present in %v", groupVersionKindExtensionKey, s.Extensions) + return empty, fmt.Errorf("No %s extension present in %v", groupVersionKindExtensionKey, extensionMap) } // Expect a empty of a list with 1 element extListCasted, ok := extList.([]interface{}) if !ok { - return empty, fmt.Errorf("%s extension has unexpected type %T in %s", groupVersionKindExtensionKey, extListCasted, s.Extensions) + return empty, fmt.Errorf("%s extension has unexpected type %T in %s", groupVersionKindExtensionKey, extListCasted, extensionMap) } if len(extListCasted) == 0 { return empty, fmt.Errorf("No Group Version Kind found in %v", extListCasted) @@ -366,9 +392,9 @@ func (o *Resources) getGroupVersionKind(s spec.Schema) (schema.GroupVersionKind, gvk := extListCasted[0] // Expect a empty of a map with 3 entries - gvkMap, ok := gvk.(map[string]interface{}) + gvkMap, ok := gvk.(map[interface{}]interface{}) if !ok { - return empty, fmt.Errorf("%s extension has unexpected type %T in %s", groupVersionKindExtensionKey, gvk, s.Extensions) + return empty, fmt.Errorf("%s extension has unexpected type %T in %s", groupVersionKindExtensionKey, gvk, extList) } group, ok := gvkMap["group"].(string) if !ok { diff --git a/pkg/kubectl/cmd/util/openapi/openapi_cache.go b/pkg/kubectl/cmd/util/openapi/openapi_cache.go index 25f55171c03..98d7341c43d 100644 --- a/pkg/kubectl/cmd/util/openapi/openapi_cache.go +++ b/pkg/kubectl/cmd/util/openapi/openapi_cache.go @@ -187,7 +187,6 @@ func linkFiles(old, new string) error { // registerBinaryEncodingTypes registers the types so they can be binary encoded by gob func registerBinaryEncodingTypes() { - gob.Register(map[string]interface{}{}) + gob.Register(map[interface{}]interface{}{}) gob.Register([]interface{}{}) - gob.Register(Resources{}) } diff --git a/pkg/kubectl/cmd/util/openapi/openapi_cache_test.go b/pkg/kubectl/cmd/util/openapi/openapi_cache_test.go index 1f61d221789..19c47bc0a45 100644 --- a/pkg/kubectl/cmd/util/openapi/openapi_cache_test.go +++ b/pkg/kubectl/cmd/util/openapi/openapi_cache_test.go @@ -23,8 +23,10 @@ import ( "path/filepath" "sync" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" + "gopkg.in/yaml.v2" + + "github.com/googleapis/gnostic/OpenAPIv2" + "github.com/googleapis/gnostic/compiler" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -220,7 +222,7 @@ type fakeOpenAPIClient struct { err error } -func (f *fakeOpenAPIClient) OpenAPISchema() (*spec.Swagger, error) { +func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) { f.calls = f.calls + 1 if f.err != nil { @@ -235,11 +237,11 @@ var data apiData type apiData struct { sync.Once - data *spec.Swagger + data *openapi_v2.Document err error } -func (d *apiData) OpenAPISchema() (*spec.Swagger, error) { +func (d *apiData) OpenAPISchema() (*openapi_v2.Document, error) { d.Do(func() { // Get the path to the swagger.json file wd, err := os.Getwd() @@ -261,14 +263,18 @@ func (d *apiData) OpenAPISchema() (*spec.Swagger, error) { d.err = err return } - // Load the openapi document - doc, err := loads.Spec(specpath) + spec, err := ioutil.ReadFile(specpath) if err != nil { d.err = err return } - - d.data = doc.Spec() + var info yaml.MapSlice + err = yaml.Unmarshal(spec, &info) + if err != nil { + d.err = err + return + } + d.data, d.err = openapi_v2.NewDocument(info, compiler.NewContext("$root", nil)) }) return d.data, d.err } diff --git a/pkg/kubectl/cmd/util/printing.go b/pkg/kubectl/cmd/util/printing.go index e71a5fdc7c5..b1e9b3527d5 100644 --- a/pkg/kubectl/cmd/util/printing.go +++ b/pkg/kubectl/cmd/util/printing.go @@ -100,7 +100,7 @@ func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resou func ValidateOutputArgs(cmd *cobra.Command) error { outputMode := GetFlagString(cmd, "output") if outputMode != "" && outputMode != "name" { - return UsageError(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode) + return UsageErrorf(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode) } return nil } diff --git a/pkg/kubectl/cmd/version.go b/pkg/kubectl/cmd/version.go index 8e0b7b4ba3c..799db251e97 100644 --- a/pkg/kubectl/cmd/version.go +++ b/pkg/kubectl/cmd/version.go @@ -37,6 +37,14 @@ type Version struct { ServerVersion *apimachineryversion.Info `json:"serverVersion,omitempty" yaml:"serverVersion,omitempty"` } +// VersionOptions: describe the options available to users of the "kubectl +// version" command. +type VersionOptions struct { + clientOnly bool + short bool + output string +} + var ( versionExample = templates.Examples(i18n.T(` # Print the client and server versions for the current context @@ -50,66 +58,19 @@ func NewCmdVersion(f cmdutil.Factory, out io.Writer) *cobra.Command { Long: "Print the client and server version information for the current context", Example: versionExample, Run: func(cmd *cobra.Command, args []string) { - err := RunVersion(f, out, cmd) - cmdutil.CheckErr(err) + options := new(VersionOptions) + cmdutil.CheckErr(options.Complete(cmd)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.Run(f, out)) }, } cmd.Flags().BoolP("client", "c", false, "Client version only (no server required).") cmd.Flags().BoolP("short", "", false, "Print just the version number.") - cmd.Flags().String("output", "", "output format, options available are yaml and json") + cmd.Flags().String("output", "", "one of 'yaml' or 'json'") cmd.Flags().MarkShorthandDeprecated("client", "please use --client instead.") return cmd } -func RunVersion(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { - var serverVersion *apimachineryversion.Info = nil - var serverErr error = nil - vo := Version{nil, nil} - - clientVersion := version.Get() - vo.ClientVersion = &clientVersion - - if !cmdutil.GetFlagBool(cmd, "client") { - serverVersion, serverErr = retrieveServerVersion(f) - vo.ServerVersion = serverVersion - } - - switch of := cmdutil.GetFlagString(cmd, "output"); of { - case "": - if cmdutil.GetFlagBool(cmd, "short") { - fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion) - - if serverVersion != nil { - fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion) - } - } else { - fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion)) - - if serverVersion != nil { - fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion)) - } - } - case "yaml": - y, err := yaml.Marshal(&vo) - if err != nil { - return err - } - - fmt.Fprintln(out, string(y)) - case "json": - y, err := json.Marshal(&vo) - if err != nil { - return err - } - fmt.Fprintln(out, string(y)) - default: - return errors.New("invalid output format: " + of) - - } - - return serverErr -} - func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error) { discoveryClient, err := f.DiscoveryClient() if err != nil { @@ -118,11 +79,69 @@ func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error) // Always request fresh data from the server discoveryClient.Invalidate() + return discoveryClient.ServerVersion() +} - serverVersion, err := discoveryClient.ServerVersion() - if err != nil { - return nil, err +func (o *VersionOptions) Run(f cmdutil.Factory, out io.Writer) error { + var ( + serverVersion *apimachineryversion.Info + serverErr error + versionInfo Version + ) + + clientVersion := version.Get() + versionInfo.ClientVersion = &clientVersion + + if !o.clientOnly { + serverVersion, serverErr = retrieveServerVersion(f) + versionInfo.ServerVersion = serverVersion } - return serverVersion, nil + switch o.output { + case "": + if o.short { + fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion) + if serverVersion != nil { + fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion) + } + } else { + fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion)) + if serverVersion != nil { + fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion)) + } + } + case "yaml": + marshalled, err := yaml.Marshal(&versionInfo) + if err != nil { + return err + } + fmt.Fprintln(out, string(marshalled)) + case "json": + marshalled, err := json.Marshal(&versionInfo) + if err != nil { + return err + } + fmt.Fprintln(out, string(marshalled)) + default: + // There is a bug in the program if we hit this case. + // However, we follow a policy of never panicking. + return fmt.Errorf("VersionOptions were not validated: --output=%q should have been rejected", o.output) + } + + return serverErr +} + +func (o *VersionOptions) Complete(cmd *cobra.Command) error { + o.clientOnly = cmdutil.GetFlagBool(cmd, "client") + o.short = cmdutil.GetFlagBool(cmd, "short") + o.output = cmdutil.GetFlagString(cmd, "output") + return nil +} + +func (o *VersionOptions) Validate() error { + if o.output != "" && o.output != "yaml" && o.output != "json" { + return errors.New(`--output must be 'yaml' or 'json'`) + } + + return nil } diff --git a/pkg/kubectl/history.go b/pkg/kubectl/history.go index aca1963eaf7..8f07a40bee2 100644 --- a/pkg/kubectl/history.go +++ b/pkg/kubectl/history.go @@ -39,8 +39,8 @@ import ( clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - sliceutil "k8s.io/kubernetes/pkg/util/slice" ) const ( diff --git a/pkg/kubectl/proxy_server.go b/pkg/kubectl/proxy_server.go index ce60727b4b8..f1712287ee9 100644 --- a/pkg/kubectl/proxy_server.go +++ b/pkg/kubectl/proxy_server.go @@ -29,7 +29,7 @@ import ( "github.com/golang/glog" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/kubectl/util" ) const ( diff --git a/pkg/kubectl/resource/BUILD b/pkg/kubectl/resource/BUILD index 8f9b8176d35..637a3fb3ac7 100644 --- a/pkg/kubectl/resource/BUILD +++ b/pkg/kubectl/resource/BUILD @@ -26,7 +26,6 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/validation:go_default_library", - "//pkg/apis/extensions:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/text/encoding/unicode:go_default_library", "//vendor/golang.org/x/text/transform:go_default_library", diff --git a/pkg/kubectl/resource/helper.go b/pkg/kubectl/resource/helper.go index 2cbdd847471..e6452244b67 100644 --- a/pkg/kubectl/resource/helper.go +++ b/pkg/kubectl/resource/helper.go @@ -58,6 +58,7 @@ func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error Resource(m.Resource). Name(name) if export { + // TODO: I should be part of GetOptions req.Param("export", strconv.FormatBool(export)) } return req.Do().Get() @@ -68,8 +69,11 @@ func (m *Helper) List(namespace, apiVersion string, selector labels.Selector, ex req := m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). - LabelsSelectorParam(selector) + VersionedParams(&metav1.ListOptions{ + LabelSelector: selector.String(), + }, metav1.ParameterCodec) if export { + // TODO: I should be part of ListOptions req.Param("export", strconv.FormatBool(export)) } return req.Do().Get() @@ -79,9 +83,11 @@ func (m *Helper) Watch(namespace, resourceVersion, apiVersion string, labelSelec return m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). - Param("resourceVersion", resourceVersion). - Param("watch", "true"). - LabelsSelectorParam(labelSelector). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + LabelSelector: labelSelector.String(), + }, metav1.ParameterCodec). Watch() } @@ -89,9 +95,11 @@ func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Int return m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). - Param("resourceVersion", resourceVersion). - Param("watch", "true"). - FieldsSelectorParam(fields.OneTermEqualSelector("metadata.name", name)). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }, metav1.ParameterCodec). Watch() } diff --git a/pkg/kubectl/resource/result.go b/pkg/kubectl/resource/result.go index c77502bfec0..bb185754c5c 100644 --- a/pkg/kubectl/resource/result.go +++ b/pkg/kubectl/resource/result.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" ) // ErrMatchFunc can be used to filter errors that may not be true failures. @@ -266,13 +265,6 @@ func AsVersionedObjects(infos []*Info, version schema.GroupVersion, encoder runt continue } - // TODO: use info.VersionedObject as the value? - switch obj := info.Object.(type) { - case *extensions.ThirdPartyResourceData: - objects = append(objects, &runtime.Unknown{Raw: obj.Data}) - continue - } - // objects that are not part of api.Scheme must be converted to JSON // TODO: convert to map[string]interface{}, attach to runtime.Unknown? if !version.Empty() { diff --git a/pkg/kubectl/resource_filter.go b/pkg/kubectl/resource_filter.go index 0567d9a6e8b..a85d1454b9a 100644 --- a/pkg/kubectl/resource_filter.go +++ b/pkg/kubectl/resource_filter.go @@ -38,21 +38,18 @@ func NewResourceFilter() Filters { } // filterPods returns true if a pod should be skipped. -// defaults to true for terminated pods +// If show-all is true, the pod will be never be skipped (return false); +// otherwise, skip terminated pod. func filterPods(obj runtime.Object, options printers.PrintOptions) bool { + if options.ShowAll { + return false + } + switch p := obj.(type) { case *v1.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(v1.PodSucceeded) || reason == string(v1.PodFailed)) + return p.Status.Phase == v1.PodSucceeded || p.Status.Phase == v1.PodFailed case *api.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) + return p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed } return false } diff --git a/pkg/kubectl/resource_filter_test.go b/pkg/kubectl/resource_filter_test.go new file mode 100644 index 00000000000..a4593363357 --- /dev/null +++ b/pkg/kubectl/resource_filter_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/printers" +) + +func TestResourceFilter(t *testing.T) { + tests := []struct { + name string + hide bool + object runtime.Object + }{ + {"v1.Pod pending", false, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodPending}}}, + {"v1.Pod running", false, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodRunning}}}, + {"v1.Pod succeeded", true, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodSucceeded}}}, + {"v1.Pod failed", true, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodFailed}}}, + {"v1.Pod evicted", true, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodFailed, Reason: "Evicted"}}}, + {"v1.Pod unknown", false, &v1.Pod{Status: v1.PodStatus{Phase: v1.PodUnknown}}}, + + {"api.Pod pending", false, &api.Pod{Status: api.PodStatus{Phase: api.PodPending}}}, + {"api.Pod running", false, &api.Pod{Status: api.PodStatus{Phase: api.PodRunning}}}, + {"api.Pod succeeded", true, &api.Pod{Status: api.PodStatus{Phase: api.PodSucceeded}}}, + {"api.Pod failed", true, &api.Pod{Status: api.PodStatus{Phase: api.PodFailed}}}, + {"api.Pod evicted", true, &api.Pod{Status: api.PodStatus{Phase: api.PodFailed, Reason: "Evicted"}}}, + {"api.Pod unknown", false, &api.Pod{Status: api.PodStatus{Phase: api.PodUnknown}}}, + } + + filters := NewResourceFilter() + + options := &printers.PrintOptions{ + ShowAll: false, + } + for _, test := range tests { + got, err := filters.Filter(test.object, options) + if err != nil { + t.Errorf("%v: unexpected error: %v", test.name, err) + continue + } + if want := test.hide; got != want { + t.Errorf("%v: got %v, want %v", test.name, got, want) + } + } + + options.ShowAll = true + for _, test := range tests { + got, err := filters.Filter(test.object, options) + if err != nil { + t.Errorf("%v: unexpected error: %v", test.name, err) + continue + } + if want := false; got != want { + t.Errorf("%v (ShowAll): got %v, want %v", test.name, got, want) + } + } +} diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index ded384b2b52..ee9594db648 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -39,8 +39,8 @@ import ( clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/daemon" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - sliceutil "k8s.io/kubernetes/pkg/util/slice" ) const ( diff --git a/pkg/kubectl/run.go b/pkg/kubectl/run.go index 2ec50978d5d..e1f4549d62c 100644 --- a/pkg/kubectl/run.go +++ b/pkg/kubectl/run.go @@ -52,6 +52,7 @@ func (DeploymentV1Beta1) ParamNames() []GeneratorParam { {"env", false}, {"requests", false}, {"limits", false}, + {"serviceaccount", false}, } } @@ -141,6 +142,7 @@ func (DeploymentAppsV1Beta1) ParamNames() []GeneratorParam { {"env", false}, {"requests", false}, {"limits", false}, + {"serviceaccount", false}, } } @@ -306,6 +308,7 @@ func (JobV1) ParamNames() []GeneratorParam { {"requests", false}, {"limits", false}, {"restart", false}, + {"serviceaccount", false}, } } @@ -400,6 +403,7 @@ func (CronJobV2Alpha1) ParamNames() []GeneratorParam { {"limits", false}, {"restart", false}, {"schedule", true}, + {"serviceaccount", false}, } } @@ -498,6 +502,7 @@ func (BasicReplicationController) ParamNames() []GeneratorParam { {"env", false}, {"requests", false}, {"limits", false}, + {"serviceaccount", false}, } } @@ -603,6 +608,7 @@ func makePodSpec(params map[string]string, name string) (*v1.PodSpec, error) { } spec := v1.PodSpec{ + ServiceAccountName: params["serviceaccount"], Containers: []v1.Container{ { Name: name, @@ -761,6 +767,7 @@ func (BasicPod) ParamNames() []GeneratorParam { {"env", false}, {"requests", false}, {"limits", false}, + {"serviceaccount", false}, } } @@ -821,6 +828,7 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, Labels: labels, }, Spec: v1.PodSpec{ + ServiceAccountName: params["serviceaccount"], Containers: []v1.Container{ { Name: name, diff --git a/pkg/kubectl/secret.go b/pkg/kubectl/secret.go index 6f5f01b833d..ecb2f34e95a 100644 --- a/pkg/kubectl/secret.go +++ b/pkg/kubectl/secret.go @@ -217,7 +217,7 @@ func handleFromEnvFileSource(secret *api.Secret, envFileSource string) error { } } if info.IsDir() { - return fmt.Errorf("must be a file") + return fmt.Errorf("env secret file cannot be a directory") } return addFromEnvFile(envFileSource, func(key, value string) error { diff --git a/pkg/kubectl/service_basic.go b/pkg/kubectl/service_basic.go index a28a76f3cc8..d53cc60fcd2 100644 --- a/pkg/kubectl/service_basic.go +++ b/pkg/kubectl/service_basic.go @@ -179,6 +179,7 @@ func (s ServiceExternalNameGeneratorV1) Generate(params map[string]interface{}) } // validate validates required fields are set to support structured generation +// TODO(xiangpengzhao): validate ports are identity mapped for headless service when we enforce that in validation.validateServicePort. func (s ServiceCommonGeneratorV1) validate() error { if len(s.Name) == 0 { return fmt.Errorf("name must be specified") @@ -189,9 +190,6 @@ func (s ServiceCommonGeneratorV1) validate() error { if s.ClusterIP == api.ClusterIPNone && s.Type != api.ServiceTypeClusterIP { return fmt.Errorf("ClusterIP=None can only be used with ClusterIP service type") } - if s.ClusterIP == api.ClusterIPNone && len(s.TCP) > 0 { - return fmt.Errorf("can not map ports with clusterip=None") - } if s.ClusterIP != api.ClusterIPNone && len(s.TCP) == 0 && s.Type != api.ServiceTypeExternalName { return fmt.Errorf("at least one tcp port specifier must be provided") } diff --git a/pkg/kubectl/service_basic_test.go b/pkg/kubectl/service_basic_test.go index b0363e7dfca..a7fea486085 100644 --- a/pkg/kubectl/service_basic_test.go +++ b/pkg/kubectl/service_basic_test.go @@ -57,13 +57,6 @@ func TestServiceBasicGenerate(t *testing.T) { serviceType: api.ServiceTypeClusterIP, expectErr: true, }, - { - name: "clusterip-none and port mapping", - tcp: []string{"456:9898"}, - clusterip: "None", - serviceType: api.ServiceTypeClusterIP, - expectErr: true, - }, { name: "clusterip-none-wrong-type", tcp: []string{}, @@ -88,6 +81,23 @@ func TestServiceBasicGenerate(t *testing.T) { }, expectErr: false, }, + { + name: "clusterip-none-and-port-mapping", + tcp: []string{"456:9898"}, + clusterip: "None", + serviceType: api.ServiceTypeClusterIP, + expected: &api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterip-none-and-port-mapping", + Labels: map[string]string{"app": "clusterip-none-and-port-mapping"}, + }, + Spec: api.ServiceSpec{Type: "ClusterIP", + Ports: []api.ServicePort{{Name: "456-9898", Protocol: "TCP", Port: 456, TargetPort: intstr.IntOrString{Type: 0, IntVal: 9898, StrVal: ""}, NodePort: 0}}, + Selector: map[string]string{"app": "clusterip-none-and-port-mapping"}, + ClusterIP: "None", ExternalIPs: []string(nil), LoadBalancerIP: ""}, + }, + expectErr: false, + }, { name: "loadbalancer-ok", tcp: []string{"456:9898"}, diff --git a/pkg/kubectl/service_test.go b/pkg/kubectl/service_test.go index 6266779c52d..bd80422ea31 100644 --- a/pkg/kubectl/service_test.go +++ b/pkg/kubectl/service_test.go @@ -560,6 +560,26 @@ func TestGenerateService(t *testing.T) { }, }, }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "cluster-ip": "None", + }, + expected: api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{}, + ClusterIP: api.ClusterIPNone, + }, + }, + }, } for _, test := range tests { obj, err := test.generator.Generate(test.params) diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index 86510ac02af..fe28b0417db 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -38,7 +38,6 @@ import ( coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/util" ) const ( @@ -82,9 +81,6 @@ func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, er case api.Kind("Pod"): return &PodReaper{c.Core()}, nil - case api.Kind("Service"): - return &ServiceReaper{c.Core()}, nil - case batch.Kind("Job"): return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil @@ -127,9 +123,6 @@ type DeploymentReaper struct { type PodReaper struct { client coreclient.PodsGetter } -type ServiceReaper struct { - client coreclient.ServicesGetter -} type StatefulSetReaper struct { client appsclient.StatefulSetsGetter podClient coreclient.PodsGetter @@ -401,7 +394,8 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) + rhl := int32(0) + d.Spec.RevisionHistoryLimit = &rhl d.Spec.Replicas = 0 d.Spec.Paused = true }) @@ -484,14 +478,3 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra } return pods.Delete(name, gracePeriod) } - -func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - services := reaper.client.Services(namespace) - _, err := services.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} - return services.Delete(name, deleteOptions) -} diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index dd8752903e7..0d2f5ea9daa 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -558,26 +558,26 @@ func (c *noSuchPod) Get(name string, options metav1.GetOptions) (*api.Pod, error return nil, fmt.Errorf("%s does not exist", name) } -type noDeleteService struct { - coreclient.ServiceInterface +type noDeletePod struct { + coreclient.PodInterface } -func (c *noDeleteService) Delete(service string, o *metav1.DeleteOptions) error { +func (c *noDeletePod) Delete(name string, o *metav1.DeleteOptions) error { return fmt.Errorf("I'm afraid I can't do that, Dave") } type reaperFake struct { *fake.Clientset - noSuchPod, noDeleteService bool + noSuchPod, noDeletePod bool } func (c *reaperFake) Core() coreclient.CoreInterface { - return &reaperCoreFake{c.Clientset.Core(), c.noSuchPod, c.noDeleteService} + return &reaperCoreFake{c.Clientset.Core(), c.noSuchPod, c.noDeletePod} } type reaperCoreFake struct { coreclient.CoreInterface - noSuchPod, noDeleteService bool + noSuchPod, noDeletePod bool } func (c *reaperCoreFake) Pods(namespace string) coreclient.PodInterface { @@ -585,25 +585,16 @@ func (c *reaperCoreFake) Pods(namespace string) coreclient.PodInterface { if c.noSuchPod { return &noSuchPod{pods} } - return pods -} - -func (c *reaperCoreFake) Services(namespace string) coreclient.ServiceInterface { - services := c.CoreInterface.Services(namespace) - if c.noDeleteService { - return &noDeleteService{services} + if c.noDeletePod { + return &noDeletePod{pods} } - return services + return pods } func pod() *api.Pod { return &api.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}} } -func service() *api.Service { - return &api.Service{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}} -} - func TestSimpleStop(t *testing.T) { tests := []struct { fake *reaperFake @@ -624,18 +615,6 @@ func TestSimpleStop(t *testing.T) { expectError: false, test: "stop pod succeeds", }, - { - fake: &reaperFake{ - Clientset: fake.NewSimpleClientset(service()), - }, - kind: api.Kind("Service"), - actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), - testcore.NewDeleteAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), - }, - expectError: false, - test: "stop service succeeds", - }, { fake: &reaperFake{ Clientset: fake.NewSimpleClientset(), @@ -648,15 +627,15 @@ func TestSimpleStop(t *testing.T) { }, { fake: &reaperFake{ - Clientset: fake.NewSimpleClientset(service()), - noDeleteService: true, + Clientset: fake.NewSimpleClientset(pod()), + noDeletePod: true, }, - kind: api.Kind("Service"), + kind: api.Kind("Pod"), actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), + testcore.NewGetAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), }, expectError: true, - test: "stop service fails, can't delete", + test: "stop pod fails, can't delete", }, } for _, test := range tests { diff --git a/pkg/kubectl/testing/BUILD b/pkg/kubectl/testing/BUILD index 8a9ac3d13f8..2b5317576e5 100644 --- a/pkg/kubectl/testing/BUILD +++ b/pkg/kubectl/testing/BUILD @@ -18,7 +18,6 @@ go_library( deps = [ "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) diff --git a/pkg/kubectl/testing/types.go b/pkg/kubectl/testing/types.go index ab9a54f45d3..2b8ca1df5d1 100644 --- a/pkg/kubectl/testing/types.go +++ b/pkg/kubectl/testing/types.go @@ -18,7 +18,6 @@ package testing import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) type TestStruct struct { @@ -30,5 +29,3 @@ type TestStruct struct { StringList []string `json:"StringList"` IntList []int `json:"IntList"` } - -func (obj *TestStruct) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/pkg/kubectl/util/BUILD b/pkg/kubectl/util/BUILD index f4113db90fe..aa82e750aaf 100644 --- a/pkg/kubectl/util/BUILD +++ b/pkg/kubectl/util/BUILD @@ -7,7 +7,10 @@ load( go_library( name = "go_default_library", - srcs = ["util.go"], + srcs = [ + "umask.go", + "util.go", + ], tags = ["automanaged"], visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], deps = [ @@ -25,7 +28,12 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubectl/util/crlf:all-srcs", + "//pkg/kubectl/util/slice:all-srcs", + "//pkg/kubectl/util/term:all-srcs", + ], tags = ["automanaged"], visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], ) diff --git a/pkg/util/crlf/BUILD b/pkg/kubectl/util/crlf/BUILD similarity index 100% rename from pkg/util/crlf/BUILD rename to pkg/kubectl/util/crlf/BUILD diff --git a/pkg/util/crlf/crlf.go b/pkg/kubectl/util/crlf/crlf.go similarity index 100% rename from pkg/util/crlf/crlf.go rename to pkg/kubectl/util/crlf/crlf.go diff --git a/pkg/kubectl/util/slice/BUILD b/pkg/kubectl/util/slice/BUILD new file mode 100644 index 00000000000..35cb2024d50 --- /dev/null +++ b/pkg/kubectl/util/slice/BUILD @@ -0,0 +1,35 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["slice.go"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["slice_test.go"], + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/util/errors/doc.go b/pkg/kubectl/util/slice/slice.go similarity index 56% rename from pkg/util/errors/doc.go rename to pkg/kubectl/util/slice/slice.go index 38d261613cd..6885c4888db 100644 --- a/pkg/util/errors/doc.go +++ b/pkg/kubectl/util/slice/slice.go @@ -14,8 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package errors only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package errors +package slice + +import ( + "sort" +) + +// Int64Slice attaches the methods of Interface to []int64, +// sorting in increasing order. +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Sorts []int64 in increasing order +func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } diff --git a/pkg/util/json/doc.go b/pkg/kubectl/util/slice/slice_test.go similarity index 65% rename from pkg/util/json/doc.go rename to pkg/kubectl/util/slice/slice_test.go index a7c3c015172..7e3bec6e277 100644 --- a/pkg/util/json/doc.go +++ b/pkg/kubectl/util/slice/slice_test.go @@ -14,8 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package json only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package json +package slice + +import ( + "reflect" + "testing" +) + +func TestSortInts64(t *testing.T) { + src := []int64{10, 1, 2, 3, 4, 5, 6} + expected := []int64{1, 2, 3, 4, 5, 6, 10} + SortInts64(src) + if !reflect.DeepEqual(src, expected) { + t.Errorf("func Ints64 didnt sort correctly, %v !- %v", src, expected) + } +} diff --git a/pkg/kubectl/util/term/BUILD b/pkg/kubectl/util/term/BUILD new file mode 100644 index 00000000000..7f977b0190b --- /dev/null +++ b/pkg/kubectl/util/term/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "resize.go", + "resizeevents.go", + "term.go", + "term_writer.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/util/interrupt:go_default_library", + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/github.com/mitchellh/go-wordwrap:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["term_writer_test.go"], + library = ":go_default_library", + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/util/term/resize.go b/pkg/kubectl/util/term/resize.go similarity index 100% rename from pkg/util/term/resize.go rename to pkg/kubectl/util/term/resize.go diff --git a/pkg/util/term/resizeevents.go b/pkg/kubectl/util/term/resizeevents.go similarity index 100% rename from pkg/util/term/resizeevents.go rename to pkg/kubectl/util/term/resizeevents.go diff --git a/pkg/util/term/resizeevents_windows.go b/pkg/kubectl/util/term/resizeevents_windows.go similarity index 100% rename from pkg/util/term/resizeevents_windows.go rename to pkg/kubectl/util/term/resizeevents_windows.go diff --git a/pkg/util/term/term.go b/pkg/kubectl/util/term/term.go similarity index 100% rename from pkg/util/term/term.go rename to pkg/kubectl/util/term/term.go diff --git a/pkg/util/term/term_writer.go b/pkg/kubectl/util/term/term_writer.go similarity index 100% rename from pkg/util/term/term_writer.go rename to pkg/kubectl/util/term/term_writer.go diff --git a/pkg/util/term/term_writer_test.go b/pkg/kubectl/util/term/term_writer_test.go similarity index 100% rename from pkg/util/term/term_writer_test.go rename to pkg/kubectl/util/term/term_writer_test.go diff --git a/pkg/util/umask.go b/pkg/kubectl/util/umask.go similarity index 100% rename from pkg/util/umask.go rename to pkg/kubectl/util/umask.go diff --git a/pkg/util/umask_windows.go b/pkg/kubectl/util/umask_windows.go similarity index 100% rename from pkg/util/umask_windows.go rename to pkg/kubectl/util/umask_windows.go diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 9880249c4e3..09a34307520 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -46,7 +46,6 @@ go_library( "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library", "//pkg/client/listers/core/v1:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/features:go_default_library", @@ -96,7 +95,6 @@ go_library( "//pkg/security/apparmor:go_default_library", "//pkg/securitycontext:go_default_library", "//pkg/util:go_default_library", - "//pkg/util/bandwidth:go_default_library", "//pkg/util/dbus:go_default_library", "//pkg/util/exec:go_default_library", "//pkg/util/io:go_default_library", @@ -119,7 +117,6 @@ go_library( "//vendor/github.com/google/cadvisor/events:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -201,7 +198,6 @@ go_test( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/util/bandwidth:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/kubelet/OWNERS b/pkg/kubelet/OWNERS index f8dc405b5b1..82e510eaaab 100644 --- a/pkg/kubelet/OWNERS +++ b/pkg/kubelet/OWNERS @@ -2,7 +2,7 @@ approvers: - Random-Liu - dchen1107 - derekwaynecarr -- timstclair +- tallclair - vishh - yujuhong reviewers: diff --git a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go index 8377cd33b9c..df37cc52b6a 100644 --- a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go +++ b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go @@ -655,32 +655,12 @@ type PodSandboxConfig struct { // // In general, in order to preserve a well-defined interface between the // kubelet and the container runtime, annotations SHOULD NOT influence - // runtime behaviour. For legacy reasons, there are some annotations which - // currently explicitly break this rule, listed below; in future versions - // of the interface these will be promoted to typed features. + // runtime behaviour. // // Annotations can also be useful for runtime authors to experiment with // new features that are opaque to the Kubernetes APIs (both user-facing // and the CRI). Whenever possible, however, runtime authors SHOULD // consider proposing new typed fields for any new features instead. - // - // 1. Seccomp - // - // key: security.alpha.kubernetes.io/seccomp/pod - // description: the seccomp profile for the containers of an entire pod. - // value: see below. - // - // key: security.alpha.kubernetes.io/seccomp/container/ - // description: the seccomp profile for the container (overrides pod). - // value: see below - // - // The value of seccomp is runtime agnostic: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed to the node's - // local seccomp profile root. Note that profile root is set in - // kubelet, and it is not passed in CRI yet, see https://issues.k8s.io/36997. - // Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Optional configurations specific to Linux hosts. Linux *LinuxPodSandboxConfig `protobuf:"bytes,8,opt,name=linux" json:"linux,omitempty"` @@ -1350,6 +1330,12 @@ type LinuxContainerSecurityContext struct { // (localhost) by name. The possible profile names are detailed at // http://wiki.apparmor.net/index.php/AppArmor_Core_Policy_Reference ApparmorProfile string `protobuf:"bytes,9,opt,name=apparmor_profile,json=apparmorProfile,proto3" json:"apparmor_profile,omitempty"` + // Seccomp profile for the container, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + SeccompProfilePath string `protobuf:"bytes,10,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` } func (m *LinuxContainerSecurityContext) Reset() { *m = LinuxContainerSecurityContext{} } @@ -1421,6 +1407,13 @@ func (m *LinuxContainerSecurityContext) GetApparmorProfile() string { return "" } +func (m *LinuxContainerSecurityContext) GetSeccompProfilePath() string { + if m != nil { + return m.SeccompProfilePath + } + return "" +} + // LinuxContainerConfig contains platform-specific configuration for // Linux-based containers. type LinuxContainerConfig struct { @@ -5722,6 +5715,12 @@ func (m *LinuxContainerSecurityContext) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintApi(dAtA, i, uint64(len(m.ApparmorProfile))) i += copy(dAtA[i:], m.ApparmorProfile) } + if len(m.SeccompProfilePath) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.SeccompProfilePath))) + i += copy(dAtA[i:], m.SeccompProfilePath) + } return i, nil } @@ -8676,6 +8675,10 @@ func (m *LinuxContainerSecurityContext) Size() (n int) { if l > 0 { n += 1 + l + sovApi(uint64(l)) } + l = len(m.SeccompProfilePath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } return n } @@ -10133,6 +10136,7 @@ func (this *LinuxContainerSecurityContext) String() string { `ReadonlyRootfs:` + fmt.Sprintf("%v", this.ReadonlyRootfs) + `,`, `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, `ApparmorProfile:` + fmt.Sprintf("%v", this.ApparmorProfile) + `,`, + `SeccompProfilePath:` + fmt.Sprintf("%v", this.SeccompProfilePath) + `,`, `}`, }, "") return s @@ -15927,6 +15931,35 @@ func (m *LinuxContainerSecurityContext) Unmarshal(dAtA []byte) error { } m.ApparmorProfile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeccompProfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -23943,259 +23976,260 @@ var ( func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } var fileDescriptorApi = []byte{ - // 4051 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3b, 0x4d, 0x73, 0x1b, 0x47, - 0x76, 0x04, 0xc0, 0x0f, 0xe0, 0x81, 0x00, 0xc1, 0x16, 0x45, 0x42, 0x90, 0x44, 0xd3, 0x23, 0xcb, - 0x96, 0xb4, 0x2b, 0x59, 0xa6, 0xd7, 0x76, 0x2c, 0x7f, 0x09, 0x26, 0x29, 0x17, 0x2d, 0x89, 0xe4, - 0x0e, 0x44, 0xef, 0x6e, 0x36, 0x55, 0x93, 0x21, 0xa6, 0x09, 0x8e, 0x0d, 0x4c, 0xcf, 0xce, 0xf4, - 0xc8, 0x62, 0x4e, 0xc9, 0x2d, 0x47, 0xe7, 0x98, 0x5b, 0x0e, 0xa9, 0xda, 0xca, 0x25, 0x87, 0x9c, - 0xf2, 0x0b, 0x52, 0x5b, 0x95, 0x4a, 0x55, 0x0e, 0xa9, 0x54, 0x72, 0xdb, 0x55, 0x0e, 0x39, 0xa4, - 0x2a, 0xbf, 0x61, 0xab, 0xbf, 0x66, 0x7a, 0xbe, 0x20, 0x52, 0x76, 0xed, 0xea, 0x36, 0xfd, 0xfa, - 0xbd, 0xd7, 0xaf, 0xfb, 0xbd, 0x7e, 0xfd, 0xde, 0xeb, 0x1e, 0x68, 0xd8, 0xbe, 0x7b, 0xc7, 0x0f, - 0x08, 0x25, 0x68, 0x21, 0x88, 0x3c, 0xea, 0x4e, 0x70, 0xef, 0xf6, 0xc8, 0xa5, 0x27, 0xd1, 0xd1, - 0x9d, 0x21, 0x99, 0xbc, 0x3d, 0x22, 0x23, 0xf2, 0x36, 0xef, 0x3f, 0x8a, 0x8e, 0x79, 0x8b, 0x37, - 0xf8, 0x97, 0xa0, 0x33, 0x6e, 0x41, 0xfb, 0x2b, 0x1c, 0x84, 0x2e, 0xf1, 0x4c, 0xfc, 0xab, 0x08, - 0x87, 0x14, 0x75, 0x61, 0xe1, 0xa9, 0x80, 0x74, 0x2b, 0x1b, 0x95, 0x1b, 0x0d, 0x53, 0x35, 0x8d, - 0x5f, 0x57, 0x60, 0x29, 0x46, 0x0e, 0x7d, 0xe2, 0x85, 0xb8, 0x1c, 0x1b, 0xbd, 0x0e, 0x8b, 0x52, - 0x26, 0xcb, 0xb3, 0x27, 0xb8, 0x5b, 0xe5, 0xdd, 0x4d, 0x09, 0xdb, 0xb3, 0x27, 0x18, 0xbd, 0x05, - 0x4b, 0x0a, 0x45, 0x31, 0xa9, 0x71, 0xac, 0xb6, 0x04, 0xcb, 0xd1, 0xd0, 0x1d, 0xb8, 0xa0, 0x10, - 0x6d, 0xdf, 0x8d, 0x91, 0x67, 0x39, 0xf2, 0xb2, 0xec, 0xea, 0xfb, 0xae, 0xc4, 0x37, 0x7e, 0x09, - 0x8d, 0xed, 0xbd, 0xc1, 0x16, 0xf1, 0x8e, 0xdd, 0x11, 0x13, 0x31, 0xc4, 0x01, 0xa3, 0xe9, 0x56, - 0x36, 0x6a, 0x4c, 0x44, 0xd9, 0x44, 0x3d, 0xa8, 0x87, 0xd8, 0x0e, 0x86, 0x27, 0x38, 0xec, 0x56, - 0x79, 0x57, 0xdc, 0x66, 0x54, 0xc4, 0xa7, 0x2e, 0xf1, 0xc2, 0x6e, 0x4d, 0x50, 0xc9, 0xa6, 0xf1, - 0xb7, 0x15, 0x68, 0x1e, 0x90, 0x80, 0x3e, 0xb6, 0x7d, 0xdf, 0xf5, 0x46, 0xe8, 0x36, 0xd4, 0xf9, - 0x5a, 0x0e, 0xc9, 0x98, 0xaf, 0x41, 0x7b, 0x73, 0xf9, 0x8e, 0x14, 0xe9, 0xce, 0x81, 0xec, 0x30, - 0x63, 0x14, 0x74, 0x1d, 0xda, 0x43, 0xe2, 0x51, 0xdb, 0xf5, 0x70, 0x60, 0xf9, 0x24, 0xa0, 0x7c, - 0x65, 0xe6, 0xcc, 0x56, 0x0c, 0x65, 0xcc, 0xd1, 0x65, 0x68, 0x9c, 0x90, 0x90, 0x0a, 0x8c, 0x1a, - 0xc7, 0xa8, 0x33, 0x00, 0xef, 0x5c, 0x83, 0x05, 0xde, 0xe9, 0xfa, 0x72, 0x0d, 0xe6, 0x59, 0x73, - 0xd7, 0x37, 0xbe, 0xab, 0xc0, 0xdc, 0x63, 0x12, 0x79, 0x34, 0x33, 0x8c, 0x4d, 0x4f, 0xa4, 0x7e, - 0xb4, 0x61, 0x6c, 0x7a, 0x92, 0x0c, 0xc3, 0x30, 0x84, 0x8a, 0xc4, 0x30, 0xac, 0xb3, 0x07, 0xf5, - 0x00, 0xdb, 0x0e, 0xf1, 0xc6, 0xa7, 0x5c, 0x84, 0xba, 0x19, 0xb7, 0x99, 0xee, 0x42, 0x3c, 0x76, - 0xbd, 0xe8, 0x99, 0x15, 0xe0, 0xb1, 0x7d, 0x84, 0xc7, 0x5c, 0x94, 0xba, 0xd9, 0x96, 0x60, 0x53, - 0x40, 0x8d, 0xaf, 0x61, 0x89, 0x29, 0x3b, 0xf4, 0xed, 0x21, 0xde, 0xe7, 0x4b, 0xc8, 0x4c, 0x83, - 0x0f, 0xea, 0x61, 0xfa, 0x2d, 0x09, 0xbe, 0xe1, 0x92, 0xd5, 0xcd, 0x26, 0x83, 0xed, 0x09, 0x10, - 0xba, 0x04, 0x75, 0x21, 0x97, 0xeb, 0x70, 0xb1, 0xea, 0x26, 0x9f, 0xf1, 0x81, 0xeb, 0xc4, 0x5d, - 0xae, 0x3f, 0x94, 0x52, 0x2d, 0x88, 0xd9, 0x0f, 0x0d, 0x03, 0x60, 0xd7, 0xa3, 0xef, 0xff, 0xe4, - 0x2b, 0x7b, 0x1c, 0x61, 0xb4, 0x02, 0x73, 0x4f, 0xd9, 0x07, 0xe7, 0x5f, 0x33, 0x45, 0xc3, 0xf8, - 0x8f, 0x2a, 0x5c, 0x7e, 0xc4, 0x04, 0x1c, 0xd8, 0x9e, 0x73, 0x44, 0x9e, 0x0d, 0xf0, 0x30, 0x0a, - 0x5c, 0x7a, 0xba, 0x45, 0x3c, 0x8a, 0x9f, 0x51, 0xb4, 0x03, 0xcb, 0x9e, 0x92, 0xd7, 0x52, 0x26, - 0xc0, 0x38, 0x34, 0x37, 0xbb, 0xb1, 0x5e, 0x33, 0x33, 0x32, 0x3b, 0x5e, 0x1a, 0x10, 0xa2, 0xcf, - 0x92, 0xf5, 0x51, 0x4c, 0xaa, 0x9c, 0xc9, 0x6a, 0xcc, 0x64, 0xb0, 0xc3, 0xe5, 0x90, 0x2c, 0xd4, - 0xba, 0x29, 0x06, 0xef, 0x02, 0xdb, 0x2b, 0x96, 0x1d, 0x5a, 0x51, 0x88, 0x03, 0x3e, 0xd3, 0xe6, - 0xe6, 0x85, 0x98, 0x38, 0x99, 0xa7, 0xd9, 0x08, 0x22, 0xaf, 0x1f, 0x1e, 0x86, 0x38, 0xe0, 0x3b, - 0x4a, 0x6a, 0xc8, 0x0a, 0x08, 0xa1, 0xc7, 0xa1, 0xd2, 0x8a, 0x02, 0x9b, 0x1c, 0x8a, 0xde, 0x86, - 0x0b, 0x61, 0xe4, 0xfb, 0x63, 0x3c, 0xc1, 0x1e, 0xb5, 0xc7, 0xd6, 0x28, 0x20, 0x91, 0x1f, 0x76, - 0xe7, 0x36, 0x6a, 0x37, 0x6a, 0x26, 0xd2, 0xbb, 0xbe, 0xe0, 0x3d, 0x68, 0x1d, 0xc0, 0x0f, 0xdc, - 0xa7, 0xee, 0x18, 0x8f, 0xb0, 0xd3, 0x9d, 0xe7, 0x4c, 0x35, 0x88, 0xf1, 0x5d, 0x15, 0x2e, 0xf2, - 0xe9, 0x1c, 0x10, 0x47, 0xae, 0xac, 0xdc, 0x7f, 0xd7, 0xa0, 0x35, 0xe4, 0xec, 0x2d, 0xdf, 0x0e, - 0xb0, 0x47, 0xa5, 0x21, 0x2e, 0x0a, 0xe0, 0x01, 0x87, 0xa1, 0x7d, 0xe8, 0x84, 0x52, 0x11, 0xd6, - 0x50, 0x68, 0x42, 0xae, 0xd7, 0x1b, 0xf1, 0x94, 0xa7, 0x68, 0xcd, 0x5c, 0x0a, 0x73, 0x6a, 0x5c, - 0x08, 0x4f, 0xc3, 0x21, 0x1d, 0x8b, 0xfd, 0xdb, 0xdc, 0xfc, 0x51, 0x9a, 0x4f, 0x56, 0xcc, 0x3b, - 0x03, 0x81, 0xbd, 0xe3, 0xd1, 0xe0, 0xd4, 0x54, 0xb4, 0xbd, 0x7b, 0xb0, 0xa8, 0x77, 0xa0, 0x0e, - 0xd4, 0xbe, 0xc1, 0xa7, 0x72, 0x0a, 0xec, 0x33, 0xb1, 0x32, 0xb1, 0x7b, 0x44, 0xe3, 0x5e, 0xf5, - 0x4f, 0x2a, 0x46, 0x00, 0x28, 0x19, 0xe5, 0x31, 0xa6, 0xb6, 0x63, 0x53, 0x1b, 0x21, 0x98, 0xe5, - 0xfe, 0x50, 0xb0, 0xe0, 0xdf, 0x8c, 0x6b, 0x24, 0x0d, 0xbd, 0x61, 0xb2, 0x4f, 0x74, 0x05, 0x1a, - 0xb1, 0x49, 0x49, 0xa7, 0x98, 0x00, 0x98, 0x73, 0xb2, 0x29, 0xc5, 0x13, 0x9f, 0x72, 0xf5, 0xb6, - 0x4c, 0xd5, 0x34, 0xfe, 0x79, 0x16, 0x3a, 0x39, 0x0d, 0x7c, 0x00, 0xf5, 0x89, 0x1c, 0x5e, 0x5a, - 0xf2, 0xe5, 0xc4, 0x43, 0xe5, 0x24, 0x34, 0x63, 0x64, 0xe6, 0x00, 0xd8, 0xd6, 0xd2, 0xfc, 0x77, - 0xdc, 0x66, 0x6a, 0x1d, 0x93, 0x91, 0xe5, 0xb8, 0x01, 0x1e, 0x52, 0x12, 0x9c, 0x4a, 0x29, 0x17, - 0xc7, 0x64, 0xb4, 0xad, 0x60, 0xe8, 0x1d, 0x00, 0xc7, 0x0b, 0x99, 0x46, 0x8f, 0xdd, 0x11, 0x97, - 0xb5, 0xb9, 0x89, 0xe2, 0xb1, 0x63, 0x1f, 0x6d, 0x36, 0x1c, 0x2f, 0x94, 0xc2, 0x7e, 0x08, 0x2d, - 0xe6, 0xf3, 0xac, 0x89, 0x70, 0xaf, 0xc2, 0x26, 0x9b, 0x9b, 0x2b, 0x9a, 0xc4, 0xb1, 0xef, 0x35, - 0x17, 0xfd, 0xa4, 0x11, 0xa2, 0x4f, 0x60, 0x9e, 0xfb, 0x9c, 0xb0, 0x3b, 0xcf, 0x69, 0xae, 0x17, - 0xcc, 0x52, 0x6a, 0xfb, 0x11, 0xc7, 0x13, 0xca, 0x96, 0x44, 0xe8, 0x11, 0x34, 0x6d, 0xcf, 0x23, - 0xd4, 0x16, 0xdb, 0x75, 0x81, 0xf3, 0xb8, 0x55, 0xce, 0xa3, 0x9f, 0x20, 0x0b, 0x46, 0x3a, 0x39, - 0xfa, 0x09, 0xcc, 0xf1, 0xfd, 0xdc, 0xad, 0xf3, 0x59, 0xaf, 0x4f, 0x37, 0x3f, 0x53, 0x20, 0xf7, - 0x3e, 0x84, 0xa6, 0x26, 0xda, 0x79, 0xcc, 0xad, 0xf7, 0x29, 0x74, 0xb2, 0x12, 0x9d, 0xcb, 0x5c, - 0x77, 0x61, 0xc5, 0x8c, 0xbc, 0x44, 0x30, 0x15, 0x10, 0xbc, 0x03, 0xf3, 0x52, 0x7f, 0xc2, 0x76, - 0x2e, 0x95, 0xae, 0x88, 0x29, 0x11, 0x8d, 0x4f, 0xe0, 0x62, 0x86, 0x95, 0x0c, 0x17, 0xde, 0x80, - 0xb6, 0x4f, 0x1c, 0x2b, 0x14, 0x60, 0xcb, 0x75, 0x94, 0x33, 0xf0, 0x63, 0xdc, 0x5d, 0x87, 0x91, - 0x0f, 0x28, 0xf1, 0xf3, 0xa2, 0x9c, 0x8d, 0xbc, 0x0b, 0xab, 0x59, 0x72, 0x31, 0xbc, 0xf1, 0x19, - 0xac, 0x99, 0x78, 0x42, 0x9e, 0xe2, 0x97, 0x65, 0xdd, 0x83, 0x6e, 0x9e, 0x41, 0xc2, 0x3c, 0x81, - 0x0e, 0xa8, 0x4d, 0xa3, 0xf0, 0x7c, 0xcc, 0x6f, 0xea, 0x0c, 0xe4, 0x41, 0x28, 0xf8, 0xa0, 0x36, - 0x54, 0x5d, 0x5f, 0x12, 0x55, 0x5d, 0xdf, 0xf8, 0x0c, 0x1a, 0xf1, 0x11, 0x84, 0x36, 0x93, 0x50, - 0xa5, 0xfa, 0x82, 0x73, 0x2a, 0x0e, 0x62, 0x1e, 0xe6, 0xbc, 0xb5, 0x1c, 0x69, 0x13, 0x20, 0xf6, - 0x33, 0xea, 0xdc, 0x43, 0x79, 0x7e, 0xa6, 0x86, 0x65, 0xfc, 0x7d, 0xca, 0xe9, 0x68, 0x22, 0x3b, - 0xb1, 0xc8, 0x4e, 0xca, 0x09, 0x55, 0xcf, 0xe3, 0x84, 0xee, 0xc0, 0x5c, 0x48, 0x6d, 0x2a, 0xdc, - 0x60, 0x5b, 0x9b, 0x5c, 0x7a, 0x48, 0x6c, 0x0a, 0x34, 0x74, 0x15, 0x60, 0x18, 0x60, 0x9b, 0x62, - 0xc7, 0xb2, 0x85, 0x7f, 0xac, 0x99, 0x0d, 0x09, 0xe9, 0x53, 0x74, 0x0f, 0x16, 0x54, 0xdc, 0x31, - 0xc7, 0xc5, 0xd8, 0x28, 0x60, 0x98, 0x5a, 0x7d, 0x53, 0x11, 0x24, 0x7b, 0x7a, 0x7e, 0xfa, 0x9e, - 0x96, 0x74, 0x02, 0x59, 0x73, 0x4b, 0x0b, 0xa5, 0x6e, 0x49, 0x50, 0x9c, 0xc5, 0x2d, 0xd5, 0x4b, - 0xdd, 0x92, 0xe4, 0x31, 0xd5, 0x2d, 0xfd, 0x31, 0x1d, 0xcc, 0x63, 0xe8, 0xe6, 0x37, 0x88, 0x74, - 0x0c, 0xef, 0xc0, 0x7c, 0xc8, 0x21, 0x53, 0x9c, 0x8c, 0x24, 0x91, 0x88, 0xc6, 0x03, 0x58, 0xc9, - 0x58, 0x80, 0x08, 0xfb, 0x62, 0x7b, 0xa9, 0x9c, 0xc9, 0x5e, 0x8c, 0xff, 0xaf, 0xe8, 0xd6, 0xfb, - 0xc0, 0x1d, 0x53, 0x1c, 0xe4, 0xac, 0xf7, 0x5d, 0xc5, 0x54, 0x98, 0xee, 0xd5, 0x32, 0xa6, 0x22, - 0x22, 0x93, 0x96, 0x38, 0x80, 0x36, 0xd7, 0xa1, 0x15, 0xe2, 0x31, 0x3f, 0x10, 0x65, 0x28, 0xf2, - 0xe3, 0x02, 0x6a, 0x31, 0xae, 0x30, 0x80, 0x81, 0x44, 0x17, 0xea, 0x6b, 0x8d, 0x75, 0x58, 0xef, - 0x3e, 0xa0, 0x3c, 0xd2, 0xb9, 0xf4, 0xf0, 0x25, 0xdb, 0xfb, 0x2c, 0x93, 0x28, 0xf0, 0xf4, 0xc7, - 0x5c, 0x8c, 0x29, 0x4a, 0x10, 0x72, 0x9a, 0x12, 0xd1, 0xf8, 0xbb, 0x1a, 0x40, 0xd2, 0xf9, 0xca, - 0x6e, 0xfa, 0x0f, 0xe2, 0x2d, 0x28, 0xa2, 0x89, 0xd7, 0x0a, 0xf8, 0x15, 0x6e, 0xbe, 0x07, 0xe9, - 0xcd, 0x27, 0xe2, 0x8a, 0x37, 0x8a, 0xa8, 0x5f, 0xd9, 0x6d, 0xb7, 0x05, 0xab, 0x59, 0x75, 0xcb, - 0x4d, 0x77, 0x13, 0xe6, 0x5c, 0x8a, 0x27, 0x22, 0x2f, 0xd6, 0x93, 0x0b, 0x0d, 0x57, 0x60, 0x18, - 0xaf, 0x43, 0x63, 0x77, 0x62, 0x8f, 0xf0, 0xc0, 0xc7, 0x43, 0x36, 0x96, 0xcb, 0x1a, 0x72, 0x7c, - 0xd1, 0x30, 0x36, 0xa1, 0xfe, 0x10, 0x9f, 0x8a, 0x3d, 0x78, 0x46, 0xf9, 0x8c, 0x7f, 0xad, 0xc0, - 0x1a, 0xf7, 0x9d, 0x5b, 0x2a, 0x2b, 0x35, 0x71, 0x48, 0xa2, 0x60, 0x88, 0x43, 0xae, 0x52, 0x3f, - 0xb2, 0x7c, 0x1c, 0xb8, 0xc4, 0x91, 0x39, 0x5c, 0x63, 0xe8, 0x47, 0x07, 0x1c, 0xc0, 0x32, 0x57, - 0xd6, 0xfd, 0xab, 0x88, 0x48, 0xdb, 0xaa, 0x99, 0xf5, 0xa1, 0x1f, 0xfd, 0x94, 0xb5, 0x15, 0x6d, - 0x78, 0x62, 0x07, 0x38, 0xe4, 0x36, 0x24, 0x68, 0x07, 0x1c, 0x80, 0xde, 0x81, 0x8b, 0x13, 0x3c, - 0x21, 0xc1, 0xa9, 0x35, 0x76, 0x27, 0x2e, 0xb5, 0x5c, 0xcf, 0x3a, 0x3a, 0xa5, 0x38, 0x94, 0x86, - 0x83, 0x44, 0xe7, 0x23, 0xd6, 0xb7, 0xeb, 0x7d, 0xce, 0x7a, 0x90, 0x01, 0x2d, 0x42, 0x26, 0x56, - 0x38, 0x24, 0x01, 0xb6, 0x6c, 0xe7, 0x6b, 0x7e, 0x78, 0xd4, 0xcc, 0x26, 0x21, 0x93, 0x01, 0x83, - 0xf5, 0x9d, 0xaf, 0x0d, 0x1b, 0x5a, 0xa9, 0x9c, 0x8e, 0xc5, 0xfa, 0x3c, 0x79, 0x93, 0xb1, 0x3e, - 0xfb, 0x66, 0xb0, 0x80, 0x8c, 0xd5, 0x3a, 0xf0, 0x6f, 0x06, 0xa3, 0xa7, 0xbe, 0x0a, 0xf4, 0xf9, - 0x37, 0x5b, 0xb0, 0x31, 0x7e, 0x2a, 0xd3, 0xea, 0x86, 0x29, 0x1a, 0x86, 0x03, 0xb0, 0x65, 0xfb, - 0xf6, 0x91, 0x3b, 0x76, 0xe9, 0x29, 0xba, 0x09, 0x1d, 0xdb, 0x71, 0xac, 0xa1, 0x82, 0xb8, 0x58, - 0xd5, 0x38, 0x96, 0x6c, 0xc7, 0xd9, 0xd2, 0xc0, 0xe8, 0x47, 0xb0, 0xec, 0x04, 0xc4, 0x4f, 0xe3, - 0x8a, 0xa2, 0x47, 0x87, 0x75, 0xe8, 0xc8, 0xc6, 0xf3, 0x1a, 0x5c, 0x4d, 0xab, 0x25, 0x9b, 0x25, - 0x7f, 0x00, 0x8b, 0x99, 0x51, 0xd3, 0xe9, 0x69, 0x22, 0xa4, 0x99, 0x42, 0xcc, 0xe4, 0x91, 0xd5, - 0x6c, 0x1e, 0x59, 0x9c, 0x7e, 0xd7, 0x7e, 0x88, 0xf4, 0x7b, 0xf6, 0xfb, 0xa4, 0xdf, 0x73, 0x67, - 0x4a, 0xbf, 0xdf, 0xe4, 0x05, 0x2d, 0x45, 0xc4, 0xd3, 0xa6, 0x79, 0x51, 0x75, 0x89, 0x71, 0x3c, - 0x55, 0xf8, 0xca, 0xa4, 0xe9, 0x0b, 0xe7, 0x49, 0xd3, 0xeb, 0xa5, 0x69, 0x3a, 0xb3, 0x08, 0xdf, - 0xb7, 0x83, 0x09, 0x09, 0x2c, 0x3f, 0x20, 0xc7, 0xee, 0x18, 0x77, 0x1b, 0x5c, 0x84, 0x25, 0x05, - 0x3f, 0x10, 0x60, 0xe3, 0x1f, 0x2a, 0xb0, 0x92, 0x56, 0xb2, 0xcc, 0xc0, 0x3e, 0x85, 0x46, 0xa0, - 0x76, 0xa1, 0x54, 0xec, 0x46, 0x3a, 0xd2, 0xc9, 0xef, 0x56, 0x33, 0x21, 0x41, 0x3f, 0x2d, 0xcd, - 0xe5, 0xdf, 0x2c, 0x61, 0xf3, 0xa2, 0x6c, 0xde, 0xe8, 0xc3, 0x72, 0x8c, 0x3c, 0x35, 0x93, 0xd6, - 0x32, 0xe3, 0x6a, 0x3a, 0x33, 0xf6, 0x60, 0x7e, 0x1b, 0x3f, 0x75, 0x87, 0xf8, 0x07, 0x29, 0x8d, - 0x6d, 0x40, 0xd3, 0xc7, 0xc1, 0xc4, 0x0d, 0xc3, 0xd8, 0x40, 0x1b, 0xa6, 0x0e, 0x32, 0xfe, 0x7b, - 0x0e, 0x96, 0xb2, 0x2b, 0xfb, 0x7e, 0x2e, 0x11, 0xef, 0x25, 0x3b, 0x26, 0x3b, 0x3f, 0xed, 0x34, - 0xbc, 0xa1, 0x1c, 0x6e, 0x35, 0x13, 0x8f, 0xc7, 0x3e, 0x59, 0x3a, 0x61, 0x36, 0xff, 0x21, 0x99, - 0x4c, 0x6c, 0xcf, 0x51, 0x65, 0x4b, 0xd9, 0x64, 0xab, 0x65, 0x07, 0x23, 0xb6, 0x0d, 0x18, 0x98, - 0x7f, 0xa3, 0xd7, 0xa0, 0xc9, 0xe2, 0x5a, 0xd7, 0xe3, 0x79, 0x3c, 0x37, 0xf2, 0x86, 0x09, 0x12, - 0xb4, 0xed, 0x06, 0xe8, 0x3a, 0xcc, 0x62, 0xef, 0xa9, 0x3a, 0xf7, 0x92, 0xba, 0xa6, 0x72, 0xf4, - 0x26, 0xef, 0x46, 0x6f, 0xc2, 0xfc, 0x84, 0x44, 0x1e, 0x55, 0x11, 0x6e, 0x3b, 0x46, 0xe4, 0xc5, - 0x48, 0x53, 0xf6, 0xa2, 0x9b, 0xb0, 0xe0, 0x70, 0x1d, 0xa8, 0x30, 0x76, 0x29, 0xa9, 0x05, 0x70, - 0xb8, 0xa9, 0xfa, 0xd1, 0xc7, 0xf1, 0x89, 0xdd, 0xc8, 0x9c, 0xb9, 0x99, 0x45, 0x2d, 0x3c, 0xb6, - 0x1f, 0xa6, 0x8f, 0x6d, 0xe0, 0x2c, 0x6e, 0x96, 0xb2, 0x98, 0x9e, 0xc9, 0x5f, 0x82, 0xfa, 0x98, - 0x8c, 0x84, 0x1d, 0x34, 0x45, 0x91, 0x7b, 0x4c, 0x46, 0xdc, 0x0c, 0x56, 0x58, 0x98, 0xe2, 0xb8, - 0x5e, 0x77, 0x91, 0x6f, 0x5f, 0xd1, 0x60, 0xa7, 0x0f, 0xff, 0xb0, 0x88, 0x37, 0xc4, 0xdd, 0x16, - 0xef, 0x6a, 0x70, 0xc8, 0xbe, 0x37, 0xe4, 0x87, 0x23, 0xa5, 0xa7, 0xdd, 0x36, 0x87, 0xb3, 0x4f, - 0x16, 0x5d, 0x8a, 0xbc, 0x62, 0x29, 0x13, 0x5d, 0x16, 0xed, 0xcf, 0x57, 0xa0, 0x54, 0xf0, 0x4f, - 0x15, 0x58, 0xdd, 0xe2, 0xc1, 0x95, 0xe6, 0x09, 0xce, 0x91, 0xea, 0xa2, 0xbb, 0x71, 0x4d, 0x21, - 0x9b, 0xb1, 0x66, 0x27, 0x2b, 0xf1, 0xd0, 0x7d, 0x68, 0x2b, 0x9e, 0x92, 0xb2, 0xf6, 0xa2, 0x6a, - 0x44, 0x2b, 0xd4, 0x9b, 0xc6, 0xc7, 0xb0, 0x96, 0x93, 0x59, 0x06, 0x42, 0xaf, 0xc3, 0x62, 0xe2, - 0x11, 0x62, 0x91, 0x9b, 0x31, 0x6c, 0xd7, 0x31, 0xee, 0xc1, 0xc5, 0x01, 0xb5, 0x03, 0x9a, 0x9b, - 0xf0, 0x19, 0x68, 0x79, 0x41, 0x22, 0x4d, 0x2b, 0x6b, 0x06, 0x03, 0x58, 0x19, 0x50, 0xe2, 0xbf, - 0x04, 0x53, 0xb6, 0xd3, 0xd9, 0xb4, 0x49, 0x44, 0x65, 0xf4, 0xa3, 0x9a, 0xc6, 0x9a, 0x28, 0x9f, - 0xe4, 0x47, 0xfb, 0x08, 0x56, 0x45, 0xf5, 0xe2, 0x65, 0x26, 0x71, 0x49, 0xd5, 0x4e, 0xf2, 0x7c, - 0xb7, 0xe1, 0x42, 0xe2, 0xca, 0x93, 0x44, 0xec, 0x76, 0x3a, 0x11, 0x5b, 0xcb, 0xeb, 0x38, 0x95, - 0x87, 0xfd, 0x4d, 0x55, 0x73, 0x98, 0x25, 0x69, 0xd8, 0x66, 0x3a, 0x0d, 0xbb, 0x52, 0xc2, 0x32, - 0x95, 0x85, 0xe5, 0x2d, 0xb2, 0x56, 0x60, 0x91, 0x66, 0x2e, 0x57, 0x9b, 0xcd, 0x94, 0x8d, 0x33, - 0xb2, 0xfd, 0x41, 0x52, 0xb5, 0x5d, 0x91, 0xaa, 0xc5, 0x43, 0xc7, 0x15, 0xa5, 0xbb, 0x99, 0x54, - 0xad, 0x5b, 0x26, 0x66, 0x9c, 0xa9, 0xfd, 0xf5, 0x2c, 0x34, 0xe2, 0xbe, 0xdc, 0xc2, 0xe6, 0x17, - 0xa9, 0x5a, 0xb0, 0x48, 0xfa, 0xf9, 0x55, 0x7b, 0x99, 0xf3, 0x6b, 0xf6, 0x45, 0xe7, 0xd7, 0x65, - 0x68, 0xf0, 0x0f, 0x2b, 0xc0, 0xc7, 0xf2, 0x3c, 0xaa, 0x73, 0x80, 0x89, 0x8f, 0x13, 0x83, 0x9a, - 0x3f, 0x8b, 0x41, 0x65, 0x72, 0xc2, 0x85, 0x6c, 0x4e, 0xf8, 0x7e, 0x7c, 0xc2, 0x88, 0xb3, 0x68, - 0x3d, 0xcf, 0xae, 0xf0, 0x6c, 0xd9, 0x49, 0x9f, 0x2d, 0xe2, 0x78, 0xba, 0x56, 0x40, 0xfc, 0xca, - 0x66, 0x84, 0x8f, 0x44, 0x46, 0xa8, 0x5b, 0x95, 0x74, 0x84, 0x9b, 0x00, 0xf1, 0x9e, 0x57, 0x69, - 0x21, 0xca, 0x4f, 0xcd, 0xd4, 0xb0, 0x98, 0x57, 0x49, 0xad, 0x7f, 0x52, 0xf6, 0x3c, 0x83, 0x57, - 0xf9, 0x17, 0x3d, 0x4a, 0x2a, 0xa9, 0x1c, 0xbe, 0x9f, 0x2b, 0x22, 0x9c, 0xcd, 0xea, 0x6e, 0xa7, - 0x6b, 0x08, 0xe7, 0x33, 0x97, 0x5c, 0x09, 0x81, 0x1f, 0xea, 0x76, 0x20, 0xbb, 0x45, 0xf6, 0xd7, - 0x90, 0x90, 0x3e, 0x65, 0xa1, 0xd4, 0xb1, 0xeb, 0xb9, 0xe1, 0x89, 0xe8, 0x9f, 0xe7, 0xfd, 0xa0, - 0x40, 0x7d, 0x7e, 0xa1, 0x8b, 0x9f, 0xb9, 0xd4, 0x1a, 0x12, 0x07, 0x73, 0x63, 0x9c, 0x33, 0xeb, - 0x0c, 0xb0, 0x45, 0x1c, 0x9c, 0x6c, 0x90, 0xfa, 0xb9, 0x36, 0x48, 0x23, 0xb3, 0x41, 0x56, 0x61, - 0x3e, 0xc0, 0x76, 0x48, 0xbc, 0x2e, 0x88, 0x6b, 0x61, 0xd1, 0x62, 0x67, 0xc5, 0x04, 0x87, 0x21, - 0x1b, 0x40, 0x06, 0x30, 0xb2, 0xa9, 0x85, 0x59, 0x8b, 0x65, 0x61, 0xd6, 0x94, 0xd2, 0x64, 0x26, - 0xcc, 0x6a, 0x95, 0x85, 0x59, 0x67, 0xa9, 0x4c, 0x6a, 0x41, 0x64, 0x7b, 0x6a, 0x10, 0xa9, 0x87, - 0x63, 0x4b, 0xa9, 0x70, 0xec, 0x8f, 0xb9, 0xa7, 0x1e, 0xc2, 0x5a, 0x6e, 0x17, 0xc8, 0x4d, 0x75, - 0x37, 0x53, 0xdb, 0xec, 0x96, 0x2d, 0x50, 0x5c, 0xda, 0xfc, 0x73, 0x58, 0xda, 0x79, 0x86, 0x87, - 0x83, 0x53, 0x6f, 0x78, 0x8e, 0x88, 0xa0, 0x03, 0xb5, 0xe1, 0xc4, 0x91, 0x49, 0x3d, 0xfb, 0xd4, - 0x63, 0x84, 0x5a, 0x3a, 0x46, 0xb0, 0xa0, 0x93, 0x8c, 0x20, 0xe5, 0x5c, 0x65, 0x72, 0x3a, 0x0c, - 0x99, 0x31, 0x5f, 0x34, 0x65, 0x4b, 0xc2, 0x71, 0x10, 0xf0, 0x59, 0x0b, 0x38, 0x0e, 0x82, 0xb4, - 0x45, 0xd7, 0xd2, 0x16, 0x6d, 0x7c, 0x0d, 0x4d, 0x36, 0xc0, 0xf7, 0x12, 0x5f, 0x06, 0xca, 0xb5, - 0x24, 0x50, 0x8e, 0xe3, 0xed, 0x59, 0x2d, 0xde, 0x36, 0x36, 0x60, 0x51, 0x8c, 0x25, 0x27, 0xd2, - 0x81, 0x5a, 0x14, 0x8c, 0x95, 0xde, 0xa2, 0x60, 0x6c, 0xfc, 0x29, 0xb4, 0xfa, 0x94, 0xda, 0xc3, - 0x93, 0x73, 0xc8, 0x13, 0x8f, 0x55, 0xd5, 0x63, 0xfb, 0x9c, 0x4c, 0x86, 0x01, 0x6d, 0xc5, 0xbb, - 0x74, 0xfc, 0x3d, 0x40, 0x07, 0x24, 0xa0, 0x0f, 0x48, 0xf0, 0xad, 0x1d, 0x38, 0xe7, 0x8b, 0x95, - 0x11, 0xcc, 0xca, 0x67, 0x22, 0xb5, 0x1b, 0x73, 0x26, 0xff, 0x36, 0xde, 0x82, 0x0b, 0x29, 0x7e, - 0xa5, 0x03, 0x7f, 0x00, 0x4d, 0xee, 0x42, 0x64, 0x3c, 0x75, 0x43, 0xaf, 0xdc, 0x4d, 0xf3, 0x33, - 0x2c, 0xe3, 0x66, 0x67, 0x04, 0x87, 0xc7, 0x0e, 0xfd, 0xc7, 0x99, 0xa8, 0x63, 0x25, 0x4d, 0x9f, - 0x89, 0x38, 0xfe, 0xb1, 0x02, 0x73, 0x1c, 0x9e, 0xf3, 0xe8, 0x97, 0xa1, 0x11, 0x60, 0x9f, 0x58, - 0xd4, 0x1e, 0xc5, 0x2f, 0x6f, 0x18, 0xe0, 0x89, 0x3d, 0x0a, 0xf9, 0xc3, 0x21, 0xd6, 0xe9, 0xb8, - 0x23, 0x1c, 0x52, 0xf5, 0xfc, 0xa6, 0xc9, 0x60, 0xdb, 0x02, 0xc4, 0x96, 0x24, 0x74, 0xff, 0x42, - 0x84, 0x13, 0xb3, 0x26, 0xff, 0x46, 0xd7, 0xc5, 0x1d, 0xfa, 0x94, 0x42, 0x0d, 0xbf, 0x58, 0xef, - 0x41, 0x3d, 0x53, 0x9b, 0x89, 0xdb, 0xc6, 0xc7, 0x80, 0xf4, 0x39, 0xcb, 0x45, 0x7d, 0x13, 0xe6, - 0xf9, 0x92, 0xa8, 0xf3, 0xb0, 0x9d, 0x9e, 0xb4, 0x29, 0x7b, 0x8d, 0x4f, 0x01, 0x89, 0x55, 0x4c, - 0x9d, 0x81, 0x67, 0x5f, 0xf1, 0x8f, 0xe0, 0x42, 0x8a, 0x3e, 0xbe, 0x32, 0x4d, 0x31, 0xc8, 0x8e, - 0x2e, 0x89, 0xff, 0xad, 0x02, 0xd0, 0x8f, 0xe8, 0x89, 0x2c, 0x34, 0xe8, 0xb3, 0xac, 0xa4, 0x67, - 0xc9, 0xfa, 0x7c, 0x3b, 0x0c, 0xbf, 0x25, 0x81, 0x0a, 0xf2, 0xe2, 0x36, 0x2f, 0x12, 0x44, 0xf4, - 0x44, 0x15, 0x22, 0xd9, 0x37, 0xba, 0x0e, 0x6d, 0xf1, 0x60, 0xca, 0xb2, 0x1d, 0x27, 0xc0, 0x61, - 0x28, 0x2b, 0x92, 0x2d, 0x01, 0xed, 0x0b, 0x20, 0x43, 0x73, 0x1d, 0xec, 0x51, 0x97, 0x9e, 0x5a, - 0x94, 0x7c, 0x83, 0x3d, 0x19, 0xbe, 0xb5, 0x14, 0xf4, 0x09, 0x03, 0x32, 0xb4, 0x00, 0x8f, 0xdc, - 0x90, 0x06, 0x0a, 0x4d, 0x55, 0xc8, 0x24, 0x94, 0xa3, 0x19, 0xbf, 0xae, 0x40, 0xe7, 0x20, 0x1a, - 0x8f, 0xc5, 0x24, 0xcf, 0xbb, 0x96, 0xe8, 0x2d, 0x39, 0x8f, 0x6a, 0xc6, 0x1a, 0x92, 0x25, 0x92, - 0x93, 0xfb, 0xfe, 0x69, 0xe5, 0x5d, 0x58, 0xd6, 0x04, 0x95, 0x4a, 0x4b, 0x9d, 0xd2, 0x95, 0xf4, - 0x29, 0xcd, 0x0c, 0x45, 0x64, 0x52, 0x2f, 0x37, 0x39, 0xe3, 0x22, 0x5c, 0x48, 0xd1, 0xcb, 0x2c, - 0xec, 0x16, 0xb4, 0xe4, 0xb5, 0xa5, 0x34, 0x82, 0x4b, 0x50, 0x67, 0xee, 0x65, 0xe8, 0x3a, 0xaa, - 0x02, 0xbd, 0xe0, 0x13, 0x67, 0xcb, 0x75, 0x02, 0x63, 0x0f, 0x5a, 0xa6, 0x60, 0x2f, 0x71, 0x3f, - 0x81, 0xb6, 0xbc, 0xe4, 0xb4, 0x52, 0x97, 0xfd, 0x49, 0xb9, 0x34, 0xc5, 0xdb, 0x6c, 0x79, 0x7a, - 0xd3, 0xf8, 0x25, 0xf4, 0x0e, 0x7d, 0x87, 0x05, 0x53, 0x3a, 0x57, 0x35, 0xb5, 0x4f, 0x40, 0x3d, - 0xe8, 0x2b, 0x63, 0x9e, 0x26, 0x6b, 0x05, 0x7a, 0xd3, 0xb8, 0x0a, 0x97, 0x0b, 0x99, 0xcb, 0x79, - 0xfb, 0xd0, 0x49, 0x3a, 0x1c, 0x57, 0x15, 0xde, 0x79, 0x41, 0xbd, 0xa2, 0x15, 0xd4, 0x57, 0xe3, - 0x63, 0x58, 0x38, 0x74, 0xd9, 0xd2, 0x82, 0xa6, 0x5a, 0x59, 0xd0, 0x34, 0x9b, 0x0a, 0x9a, 0x8c, - 0x2f, 0xe3, 0xd5, 0x93, 0x11, 0xeb, 0x87, 0x3c, 0x6c, 0x16, 0x63, 0x2b, 0x37, 0x71, 0xa9, 0x60, - 0x72, 0x02, 0xc3, 0xd4, 0x90, 0x8d, 0x25, 0x68, 0xa5, 0x1c, 0x86, 0x71, 0x1f, 0xda, 0x19, 0x0f, - 0x70, 0x27, 0x13, 0x3f, 0xe4, 0x96, 0x2d, 0x13, 0x3d, 0xac, 0x48, 0x47, 0xf4, 0x20, 0xdc, 0xf5, - 0x8e, 0x89, 0xe2, 0x7b, 0x0d, 0x9a, 0x87, 0x65, 0x8f, 0xe3, 0x66, 0xd5, 0x7d, 0xcc, 0x5b, 0xb0, - 0x3c, 0xa0, 0x24, 0xb0, 0x47, 0x78, 0x97, 0xef, 0xda, 0x63, 0x57, 0xdc, 0x58, 0x44, 0x51, 0xec, - 0xbf, 0xf9, 0xb7, 0xf1, 0x9f, 0x15, 0x58, 0x7a, 0xe0, 0x8e, 0x71, 0x78, 0x1a, 0x52, 0x3c, 0x39, - 0xe4, 0xb1, 0xe4, 0x15, 0x68, 0x30, 0x69, 0x42, 0x6a, 0x4f, 0x7c, 0x75, 0x5f, 0x13, 0x03, 0xd8, - 0x1a, 0x85, 0x82, 0xb5, 0xca, 0x2e, 0xf5, 0x38, 0x3e, 0x37, 0x2a, 0x8b, 0xad, 0x25, 0x08, 0xbd, - 0x0b, 0x10, 0x85, 0xd8, 0x91, 0x77, 0x34, 0xb5, 0xcc, 0xd1, 0x73, 0xa8, 0xd7, 0xe2, 0x19, 0x9e, - 0xb8, 0xb0, 0x79, 0x0f, 0x9a, 0xae, 0x47, 0x1c, 0xcc, 0x6b, 0xf1, 0x8e, 0xcc, 0x3c, 0x8b, 0xa9, - 0x40, 0x20, 0x1e, 0x86, 0xd8, 0x31, 0xfe, 0x4c, 0x7a, 0x61, 0xb5, 0x78, 0x52, 0x07, 0x3b, 0xb0, - 0x2c, 0x36, 0xf4, 0x71, 0x3c, 0x69, 0xa5, 0xe8, 0x24, 0x9c, 0xcb, 0x2c, 0x88, 0xd9, 0x71, 0xe5, - 0xa9, 0xa8, 0x28, 0x8c, 0x7b, 0x70, 0x31, 0x15, 0xf3, 0x9d, 0x27, 0x55, 0xfa, 0x22, 0x93, 0x67, - 0x25, 0x06, 0x22, 0x13, 0x1d, 0x65, 0x1f, 0x25, 0x89, 0x4e, 0x28, 0x12, 0x9d, 0xd0, 0x30, 0xe1, - 0x52, 0x2a, 0xfd, 0x4b, 0x09, 0xf2, 0x5e, 0xe6, 0x88, 0xbf, 0x5a, 0xc2, 0x2c, 0x73, 0xd6, 0xff, - 0x6f, 0x05, 0x56, 0x8a, 0x10, 0x5e, 0xb2, 0xd0, 0xf0, 0xb3, 0x92, 0x9b, 0xf3, 0xbb, 0x53, 0xa5, - 0xf9, 0x83, 0x94, 0x64, 0x1e, 0x42, 0xaf, 0x68, 0xf5, 0xf2, 0xaa, 0xa8, 0x9d, 0x41, 0x15, 0xff, - 0x57, 0xd5, 0x4a, 0x67, 0x7d, 0x4a, 0x03, 0xf7, 0x28, 0x62, 0xc6, 0xfb, 0x43, 0xa5, 0xc0, 0xf7, - 0xe3, 0xf4, 0x4e, 0xac, 0xdf, 0x8d, 0x3c, 0x55, 0x32, 0x6a, 0x61, 0x8a, 0xb7, 0x9f, 0x4e, 0xf1, - 0x44, 0x51, 0xec, 0xf6, 0x54, 0x36, 0xaf, 0x6c, 0xdd, 0xe3, 0x79, 0x05, 0xda, 0x69, 0x3d, 0xa0, - 0x8f, 0x01, 0xec, 0x58, 0x72, 0x69, 0xf2, 0x57, 0xa6, 0xcd, 0xce, 0xd4, 0xf0, 0xd1, 0x35, 0xa8, - 0x0d, 0xfd, 0x48, 0x6a, 0x24, 0xb9, 0x1d, 0xd9, 0xf2, 0x23, 0xe1, 0x00, 0x58, 0x2f, 0x0b, 0x9a, - 0xc5, 0x7d, 0x72, 0xce, 0x73, 0x3d, 0xe6, 0x60, 0x81, 0x2a, 0x71, 0xd0, 0x67, 0xd0, 0xfe, 0x36, - 0x70, 0xa9, 0x7d, 0x34, 0xc6, 0xd6, 0xd8, 0x3e, 0xc5, 0x81, 0xf4, 0x5c, 0xe5, 0x5e, 0xa6, 0xa5, - 0xf0, 0x1f, 0x31, 0x74, 0x23, 0x82, 0xba, 0x1a, 0xff, 0x05, 0x1e, 0xf9, 0x21, 0xac, 0x45, 0x0c, - 0xcd, 0xe2, 0x77, 0xda, 0x9e, 0xed, 0x11, 0x2b, 0xc4, 0xec, 0x68, 0x52, 0xef, 0xc8, 0x8a, 0xbd, - 0xe5, 0x0a, 0x27, 0xda, 0x22, 0x01, 0xde, 0xb3, 0x3d, 0x32, 0x10, 0x14, 0xc6, 0x04, 0x9a, 0xda, - 0x74, 0x5e, 0x30, 0xf2, 0x7d, 0x58, 0x56, 0xf7, 0x4e, 0x21, 0xa6, 0xd2, 0xaf, 0x4f, 0x1b, 0x73, - 0x49, 0xa2, 0x0f, 0x30, 0xe5, 0xde, 0xfd, 0xd6, 0x15, 0xa8, 0xab, 0xb7, 0xf5, 0x68, 0x01, 0x6a, - 0x4f, 0xb6, 0x0e, 0x3a, 0x33, 0xec, 0xe3, 0x70, 0xfb, 0xa0, 0x53, 0xb9, 0x75, 0x0f, 0x96, 0x32, - 0xef, 0x44, 0xd0, 0x32, 0xb4, 0x06, 0xfd, 0xbd, 0xed, 0xcf, 0xf7, 0x7f, 0x6e, 0x99, 0x3b, 0xfd, - 0xed, 0x5f, 0x74, 0x66, 0xd0, 0x0a, 0x74, 0x14, 0x68, 0x6f, 0xff, 0x89, 0x80, 0x56, 0x6e, 0x7d, - 0x93, 0xb1, 0x11, 0x8c, 0x2e, 0xc2, 0xf2, 0xd6, 0xfe, 0xde, 0x93, 0xfe, 0xee, 0xde, 0x8e, 0x69, - 0x6d, 0x99, 0x3b, 0xfd, 0x27, 0x3b, 0xdb, 0x9d, 0x99, 0x34, 0xd8, 0x3c, 0xdc, 0xdb, 0xdb, 0xdd, - 0xfb, 0xa2, 0x53, 0x61, 0x5c, 0x13, 0xf0, 0xce, 0xcf, 0x77, 0x19, 0x72, 0x35, 0x8d, 0x7c, 0xb8, - 0xf7, 0x70, 0x6f, 0xff, 0x67, 0x7b, 0x9d, 0xda, 0xe6, 0x6f, 0x17, 0xa1, 0xad, 0x0e, 0x71, 0x1c, - 0xf0, 0xdb, 0xc9, 0x4f, 0x61, 0x41, 0xfd, 0xf6, 0x90, 0x78, 0x8f, 0xf4, 0x3f, 0x1a, 0xbd, 0x6e, - 0xbe, 0x43, 0x06, 0x43, 0x33, 0xe8, 0x80, 0x07, 0x27, 0xda, 0x9b, 0x9c, 0xab, 0x7a, 0xb8, 0x90, - 0x7b, 0xf4, 0xd3, 0x5b, 0x2f, 0xeb, 0x8e, 0x39, 0x0e, 0x58, 0x44, 0xa2, 0xbf, 0xa7, 0x44, 0xeb, - 0xfa, 0xb9, 0x9d, 0x7f, 0xa7, 0xd9, 0x7b, 0xad, 0xb4, 0x3f, 0x66, 0xfa, 0x0b, 0xe8, 0x64, 0x5f, - 0x52, 0xa2, 0xe4, 0x96, 0xb9, 0xe4, 0x95, 0x66, 0xef, 0xf5, 0x29, 0x18, 0x3a, 0xeb, 0xdc, 0x6b, - 0xc4, 0x8d, 0xf2, 0xf7, 0x64, 0x39, 0xd6, 0x65, 0x8f, 0xd4, 0xc4, 0x52, 0xa4, 0xdf, 0xd2, 0x20, - 0xfd, 0x0d, 0x60, 0xc1, 0x9b, 0x2a, 0x6d, 0x29, 0x8a, 0x1f, 0xe1, 0x18, 0x33, 0xe8, 0x2b, 0x58, - 0xca, 0x5c, 0x4c, 0xa1, 0x84, 0xaa, 0xf8, 0x9a, 0xad, 0xb7, 0x51, 0x8e, 0x90, 0xd6, 0x9b, 0x7e, - 0xed, 0x94, 0xd2, 0x5b, 0xc1, 0x5d, 0x56, 0x4a, 0x6f, 0x85, 0xf7, 0x55, 0xdc, 0xbc, 0x52, 0x97, - 0x4b, 0x9a, 0x79, 0x15, 0xdd, 0x64, 0xf5, 0xd6, 0xcb, 0xba, 0xf5, 0xe9, 0x67, 0x2e, 0x96, 0xb4, - 0xe9, 0x17, 0xdf, 0x57, 0xf5, 0x36, 0xca, 0x11, 0xb2, 0xba, 0x4a, 0xaa, 0xdc, 0x19, 0x5d, 0xe5, - 0x2e, 0x55, 0x32, 0xba, 0xca, 0x97, 0xc7, 0xa5, 0xae, 0x32, 0xe5, 0xea, 0xd7, 0x4a, 0xcb, 0x79, - 0x79, 0x5d, 0x15, 0x57, 0x08, 0x8d, 0x19, 0xd4, 0x87, 0xba, 0xaa, 0xc7, 0xa1, 0x64, 0x77, 0x67, - 0x8a, 0x80, 0xbd, 0x4b, 0x05, 0x3d, 0x31, 0x8b, 0xf7, 0x60, 0x96, 0x41, 0xd1, 0x4a, 0x0a, 0x49, - 0x91, 0x5e, 0xcc, 0x40, 0x63, 0xb2, 0x8f, 0x60, 0x5e, 0x94, 0xaf, 0x50, 0x92, 0x57, 0xa4, 0x6a, - 0x65, 0xbd, 0xb5, 0x1c, 0x3c, 0x26, 0xfe, 0x52, 0xfc, 0x0a, 0x25, 0xeb, 0x50, 0xe8, 0x72, 0xea, - 0x91, 0x7e, 0xba, 0xda, 0xd5, 0xbb, 0x52, 0xdc, 0xa9, 0xeb, 0x2b, 0x73, 0x38, 0xaf, 0x97, 0x45, - 0x4f, 0x39, 0x7d, 0x15, 0x47, 0x63, 0xc6, 0x0c, 0xb2, 0x44, 0x49, 0x27, 0xc3, 0xd8, 0x28, 0x56, - 0x74, 0x8a, 0xf9, 0xb5, 0xa9, 0x38, 0xf1, 0x00, 0x47, 0x70, 0xa1, 0x20, 0x39, 0x45, 0x09, 0x75, - 0x79, 0x5e, 0xdc, 0x7b, 0x63, 0x3a, 0x92, 0xae, 0x22, 0x69, 0x6b, 0xab, 0xfa, 0x06, 0xd5, 0x4c, - 0x6c, 0x2d, 0x07, 0x57, 0xc4, 0x9b, 0x7f, 0x55, 0x83, 0x45, 0x51, 0x42, 0x90, 0x07, 0xcc, 0x17, - 0x00, 0x49, 0x95, 0x0b, 0xf5, 0x52, 0xd3, 0x4c, 0x95, 0xfb, 0x7a, 0x97, 0x0b, 0xfb, 0x74, 0xe5, - 0x6b, 0x05, 0x2b, 0x4d, 0xf9, 0xf9, 0x32, 0x98, 0xa6, 0xfc, 0x82, 0x1a, 0x97, 0x31, 0x83, 0xb6, - 0xa1, 0x11, 0x57, 0x51, 0x90, 0x56, 0x7c, 0xc9, 0x94, 0x80, 0x7a, 0xbd, 0xa2, 0x2e, 0x5d, 0x22, - 0xad, 0x32, 0xa2, 0x49, 0x94, 0xaf, 0xb7, 0x68, 0x12, 0x15, 0x15, 0x53, 0x92, 0xd9, 0x89, 0x44, - 0x30, 0x3b, 0xbb, 0x54, 0x6e, 0x9d, 0x9d, 0x5d, 0x3a, 0x77, 0x34, 0x66, 0x3e, 0xbf, 0xf2, 0x9b, - 0xdf, 0xad, 0x57, 0xfe, 0xeb, 0x77, 0xeb, 0x33, 0x7f, 0xf9, 0x7c, 0xbd, 0xf2, 0x9b, 0xe7, 0xeb, - 0x95, 0x7f, 0x7f, 0xbe, 0x5e, 0xf9, 0xed, 0xf3, 0xf5, 0xca, 0x77, 0xff, 0xb3, 0x3e, 0x73, 0x34, - 0xcf, 0xff, 0x0d, 0x7c, 0xf7, 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xdd, 0x86, 0xaa, 0xcf, - 0x39, 0x00, 0x00, + // 4074 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3b, 0x4d, 0x6f, 0x1c, 0x47, + 0x76, 0x9c, 0x19, 0x7e, 0xcc, 0xbc, 0xe1, 0x0c, 0x87, 0x25, 0x8a, 0x1c, 0x8d, 0x24, 0x9a, 0x6e, + 0x59, 0xb6, 0xa4, 0x5d, 0xc9, 0x32, 0xbd, 0xb6, 0x63, 0xf9, 0x4b, 0x63, 0x92, 0x32, 0x68, 0x49, + 0x24, 0xb7, 0x47, 0xf4, 0xee, 0x66, 0x03, 0x74, 0x9a, 0xd3, 0xc5, 0x61, 0xdb, 0x33, 0x5d, 0xbd, + 0xdd, 0xd5, 0xb2, 0x98, 0x53, 0x72, 0xcb, 0xd1, 0x01, 0x72, 0xc9, 0x2d, 0x87, 0x00, 0x8b, 0x5c, + 0x72, 0xc8, 0x29, 0xbf, 0x20, 0x58, 0x20, 0x08, 0x90, 0x43, 0x10, 0x24, 0xb7, 0x5d, 0xe5, 0x90, + 0x43, 0x80, 0xfc, 0x86, 0xa0, 0xbe, 0xba, 0xab, 0xbf, 0x46, 0xa4, 0x6c, 0xec, 0xea, 0xd6, 0xf5, + 0xea, 0xbd, 0x57, 0xaf, 0xea, 0xbd, 0x7a, 0xf5, 0xde, 0xab, 0x6a, 0x68, 0xd8, 0xbe, 0x7b, 0xc7, + 0x0f, 0x08, 0x25, 0x68, 0x21, 0x88, 0x3c, 0xea, 0x4e, 0x70, 0xef, 0xf6, 0xc8, 0xa5, 0x27, 0xd1, + 0xd1, 0x9d, 0x21, 0x99, 0xbc, 0x3d, 0x22, 0x23, 0xf2, 0x36, 0xef, 0x3f, 0x8a, 0x8e, 0x79, 0x8b, + 0x37, 0xf8, 0x97, 0xa0, 0x33, 0x6e, 0x41, 0xfb, 0x2b, 0x1c, 0x84, 0x2e, 0xf1, 0x4c, 0xfc, 0xab, + 0x08, 0x87, 0x14, 0x75, 0x61, 0xe1, 0xa9, 0x80, 0x74, 0x2b, 0x1b, 0x95, 0x1b, 0x0d, 0x53, 0x35, + 0x8d, 0x5f, 0x57, 0x60, 0x29, 0x46, 0x0e, 0x7d, 0xe2, 0x85, 0xb8, 0x1c, 0x1b, 0xbd, 0x0e, 0x8b, + 0x52, 0x26, 0xcb, 0xb3, 0x27, 0xb8, 0x5b, 0xe5, 0xdd, 0x4d, 0x09, 0xdb, 0xb3, 0x27, 0x18, 0xbd, + 0x05, 0x4b, 0x0a, 0x45, 0x31, 0xa9, 0x71, 0xac, 0xb6, 0x04, 0xcb, 0xd1, 0xd0, 0x1d, 0xb8, 0xa0, + 0x10, 0x6d, 0xdf, 0x8d, 0x91, 0x67, 0x39, 0xf2, 0xb2, 0xec, 0xea, 0xfb, 0xae, 0xc4, 0x37, 0x7e, + 0x09, 0x8d, 0xed, 0xbd, 0xc1, 0x16, 0xf1, 0x8e, 0xdd, 0x11, 0x13, 0x31, 0xc4, 0x01, 0xa3, 0xe9, + 0x56, 0x36, 0x6a, 0x4c, 0x44, 0xd9, 0x44, 0x3d, 0xa8, 0x87, 0xd8, 0x0e, 0x86, 0x27, 0x38, 0xec, + 0x56, 0x79, 0x57, 0xdc, 0x66, 0x54, 0xc4, 0xa7, 0x2e, 0xf1, 0xc2, 0x6e, 0x4d, 0x50, 0xc9, 0xa6, + 0xf1, 0x37, 0x15, 0x68, 0x1e, 0x90, 0x80, 0x3e, 0xb6, 0x7d, 0xdf, 0xf5, 0x46, 0xe8, 0x36, 0xd4, + 0xf9, 0x5a, 0x0e, 0xc9, 0x98, 0xaf, 0x41, 0x7b, 0x73, 0xf9, 0x8e, 0x14, 0xe9, 0xce, 0x81, 0xec, + 0x30, 0x63, 0x14, 0x74, 0x1d, 0xda, 0x43, 0xe2, 0x51, 0xdb, 0xf5, 0x70, 0x60, 0xf9, 0x24, 0xa0, + 0x7c, 0x65, 0xe6, 0xcc, 0x56, 0x0c, 0x65, 0xcc, 0xd1, 0x65, 0x68, 0x9c, 0x90, 0x90, 0x0a, 0x8c, + 0x1a, 0xc7, 0xa8, 0x33, 0x00, 0xef, 0x5c, 0x83, 0x05, 0xde, 0xe9, 0xfa, 0x72, 0x0d, 0xe6, 0x59, + 0x73, 0xd7, 0x37, 0xbe, 0xab, 0xc0, 0xdc, 0x63, 0x12, 0x79, 0x34, 0x33, 0x8c, 0x4d, 0x4f, 0xa4, + 0x7e, 0xb4, 0x61, 0x6c, 0x7a, 0x92, 0x0c, 0xc3, 0x30, 0x84, 0x8a, 0xc4, 0x30, 0xac, 0xb3, 0x07, + 0xf5, 0x00, 0xdb, 0x0e, 0xf1, 0xc6, 0xa7, 0x5c, 0x84, 0xba, 0x19, 0xb7, 0x99, 0xee, 0x42, 0x3c, + 0x76, 0xbd, 0xe8, 0x99, 0x15, 0xe0, 0xb1, 0x7d, 0x84, 0xc7, 0x5c, 0x94, 0xba, 0xd9, 0x96, 0x60, + 0x53, 0x40, 0x8d, 0xaf, 0x61, 0x89, 0x29, 0x3b, 0xf4, 0xed, 0x21, 0xde, 0xe7, 0x4b, 0xc8, 0x4c, + 0x83, 0x0f, 0xea, 0x61, 0xfa, 0x2d, 0x09, 0xbe, 0xe1, 0x92, 0xd5, 0xcd, 0x26, 0x83, 0xed, 0x09, + 0x10, 0xba, 0x04, 0x75, 0x21, 0x97, 0xeb, 0x70, 0xb1, 0xea, 0x26, 0x9f, 0xf1, 0x81, 0xeb, 0xc4, + 0x5d, 0xae, 0x3f, 0x94, 0x52, 0x2d, 0x88, 0xd9, 0x0f, 0x0d, 0x03, 0x60, 0xd7, 0xa3, 0xef, 0xff, + 0xe4, 0x2b, 0x7b, 0x1c, 0x61, 0xb4, 0x02, 0x73, 0x4f, 0xd9, 0x07, 0xe7, 0x5f, 0x33, 0x45, 0xc3, + 0xf8, 0xf7, 0x2a, 0x5c, 0x7e, 0xc4, 0x04, 0x1c, 0xd8, 0x9e, 0x73, 0x44, 0x9e, 0x0d, 0xf0, 0x30, + 0x0a, 0x5c, 0x7a, 0xba, 0x45, 0x3c, 0x8a, 0x9f, 0x51, 0xb4, 0x03, 0xcb, 0x9e, 0x92, 0xd7, 0x52, + 0x26, 0xc0, 0x38, 0x34, 0x37, 0xbb, 0xb1, 0x5e, 0x33, 0x33, 0x32, 0x3b, 0x5e, 0x1a, 0x10, 0xa2, + 0xcf, 0x92, 0xf5, 0x51, 0x4c, 0xaa, 0x9c, 0xc9, 0x6a, 0xcc, 0x64, 0xb0, 0xc3, 0xe5, 0x90, 0x2c, + 0xd4, 0xba, 0x29, 0x06, 0xef, 0x02, 0xdb, 0x2b, 0x96, 0x1d, 0x5a, 0x51, 0x88, 0x03, 0x3e, 0xd3, + 0xe6, 0xe6, 0x85, 0x98, 0x38, 0x99, 0xa7, 0xd9, 0x08, 0x22, 0xaf, 0x1f, 0x1e, 0x86, 0x38, 0xe0, + 0x3b, 0x4a, 0x6a, 0xc8, 0x0a, 0x08, 0xa1, 0xc7, 0xa1, 0xd2, 0x8a, 0x02, 0x9b, 0x1c, 0x8a, 0xde, + 0x86, 0x0b, 0x61, 0xe4, 0xfb, 0x63, 0x3c, 0xc1, 0x1e, 0xb5, 0xc7, 0xd6, 0x28, 0x20, 0x91, 0x1f, + 0x76, 0xe7, 0x36, 0x6a, 0x37, 0x6a, 0x26, 0xd2, 0xbb, 0xbe, 0xe0, 0x3d, 0x68, 0x1d, 0xc0, 0x0f, + 0xdc, 0xa7, 0xee, 0x18, 0x8f, 0xb0, 0xd3, 0x9d, 0xe7, 0x4c, 0x35, 0x88, 0xf1, 0x5d, 0x15, 0x2e, + 0xf2, 0xe9, 0x1c, 0x10, 0x47, 0xae, 0xac, 0xdc, 0x7f, 0xd7, 0xa0, 0x35, 0xe4, 0xec, 0x2d, 0xdf, + 0x0e, 0xb0, 0x47, 0xa5, 0x21, 0x2e, 0x0a, 0xe0, 0x01, 0x87, 0xa1, 0x7d, 0xe8, 0x84, 0x52, 0x11, + 0xd6, 0x50, 0x68, 0x42, 0xae, 0xd7, 0x1b, 0xf1, 0x94, 0xa7, 0x68, 0xcd, 0x5c, 0x0a, 0x73, 0x6a, + 0x5c, 0x08, 0x4f, 0xc3, 0x21, 0x1d, 0x8b, 0xfd, 0xdb, 0xdc, 0xfc, 0x51, 0x9a, 0x4f, 0x56, 0xcc, + 0x3b, 0x03, 0x81, 0xbd, 0xe3, 0xd1, 0xe0, 0xd4, 0x54, 0xb4, 0xbd, 0x7b, 0xb0, 0xa8, 0x77, 0xa0, + 0x0e, 0xd4, 0xbe, 0xc1, 0xa7, 0x72, 0x0a, 0xec, 0x33, 0xb1, 0x32, 0xb1, 0x7b, 0x44, 0xe3, 0x5e, + 0xf5, 0x8f, 0x2a, 0x46, 0x00, 0x28, 0x19, 0xe5, 0x31, 0xa6, 0xb6, 0x63, 0x53, 0x1b, 0x21, 0x98, + 0xe5, 0xfe, 0x50, 0xb0, 0xe0, 0xdf, 0x8c, 0x6b, 0x24, 0x0d, 0xbd, 0x61, 0xb2, 0x4f, 0x74, 0x05, + 0x1a, 0xb1, 0x49, 0x49, 0xa7, 0x98, 0x00, 0x98, 0x73, 0xb2, 0x29, 0xc5, 0x13, 0x9f, 0x72, 0xf5, + 0xb6, 0x4c, 0xd5, 0x34, 0xfe, 0x69, 0x16, 0x3a, 0x39, 0x0d, 0x7c, 0x00, 0xf5, 0x89, 0x1c, 0x5e, + 0x5a, 0xf2, 0xe5, 0xc4, 0x43, 0xe5, 0x24, 0x34, 0x63, 0x64, 0xe6, 0x00, 0xd8, 0xd6, 0xd2, 0xfc, + 0x77, 0xdc, 0x66, 0x6a, 0x1d, 0x93, 0x91, 0xe5, 0xb8, 0x01, 0x1e, 0x52, 0x12, 0x9c, 0x4a, 0x29, + 0x17, 0xc7, 0x64, 0xb4, 0xad, 0x60, 0xe8, 0x1d, 0x00, 0xc7, 0x0b, 0x99, 0x46, 0x8f, 0xdd, 0x11, + 0x97, 0xb5, 0xb9, 0x89, 0xe2, 0xb1, 0x63, 0x1f, 0x6d, 0x36, 0x1c, 0x2f, 0x94, 0xc2, 0x7e, 0x08, + 0x2d, 0xe6, 0xf3, 0xac, 0x89, 0x70, 0xaf, 0xc2, 0x26, 0x9b, 0x9b, 0x2b, 0x9a, 0xc4, 0xb1, 0xef, + 0x35, 0x17, 0xfd, 0xa4, 0x11, 0xa2, 0x4f, 0x60, 0x9e, 0xfb, 0x9c, 0xb0, 0x3b, 0xcf, 0x69, 0xae, + 0x17, 0xcc, 0x52, 0x6a, 0xfb, 0x11, 0xc7, 0x13, 0xca, 0x96, 0x44, 0xe8, 0x11, 0x34, 0x6d, 0xcf, + 0x23, 0xd4, 0x16, 0xdb, 0x75, 0x81, 0xf3, 0xb8, 0x55, 0xce, 0xa3, 0x9f, 0x20, 0x0b, 0x46, 0x3a, + 0x39, 0xfa, 0x09, 0xcc, 0xf1, 0xfd, 0xdc, 0xad, 0xf3, 0x59, 0xaf, 0x4f, 0x37, 0x3f, 0x53, 0x20, + 0xf7, 0x3e, 0x84, 0xa6, 0x26, 0xda, 0x79, 0xcc, 0xad, 0xf7, 0x29, 0x74, 0xb2, 0x12, 0x9d, 0xcb, + 0x5c, 0x77, 0x61, 0xc5, 0x8c, 0xbc, 0x44, 0x30, 0x15, 0x10, 0xbc, 0x03, 0xf3, 0x52, 0x7f, 0xc2, + 0x76, 0x2e, 0x95, 0xae, 0x88, 0x29, 0x11, 0x8d, 0x4f, 0xe0, 0x62, 0x86, 0x95, 0x0c, 0x17, 0xde, + 0x80, 0xb6, 0x4f, 0x1c, 0x2b, 0x14, 0x60, 0xcb, 0x75, 0x94, 0x33, 0xf0, 0x63, 0xdc, 0x5d, 0x87, + 0x91, 0x0f, 0x28, 0xf1, 0xf3, 0xa2, 0x9c, 0x8d, 0xbc, 0x0b, 0xab, 0x59, 0x72, 0x31, 0xbc, 0xf1, + 0x19, 0xac, 0x99, 0x78, 0x42, 0x9e, 0xe2, 0x97, 0x65, 0xdd, 0x83, 0x6e, 0x9e, 0x41, 0xc2, 0x3c, + 0x81, 0x0e, 0xa8, 0x4d, 0xa3, 0xf0, 0x7c, 0xcc, 0x6f, 0xea, 0x0c, 0xe4, 0x41, 0x28, 0xf8, 0xa0, + 0x36, 0x54, 0x5d, 0x5f, 0x12, 0x55, 0x5d, 0xdf, 0xf8, 0x0c, 0x1a, 0xf1, 0x11, 0x84, 0x36, 0x93, + 0x50, 0xa5, 0xfa, 0x82, 0x73, 0x2a, 0x0e, 0x62, 0x1e, 0xe6, 0xbc, 0xb5, 0x1c, 0x69, 0x13, 0x20, + 0xf6, 0x33, 0xea, 0xdc, 0x43, 0x79, 0x7e, 0xa6, 0x86, 0x65, 0xfc, 0x5d, 0xca, 0xe9, 0x68, 0x22, + 0x3b, 0xb1, 0xc8, 0x4e, 0xca, 0x09, 0x55, 0xcf, 0xe3, 0x84, 0xee, 0xc0, 0x5c, 0x48, 0x6d, 0x2a, + 0xdc, 0x60, 0x5b, 0x9b, 0x5c, 0x7a, 0x48, 0x6c, 0x0a, 0x34, 0x74, 0x15, 0x60, 0x18, 0x60, 0x9b, + 0x62, 0xc7, 0xb2, 0x85, 0x7f, 0xac, 0x99, 0x0d, 0x09, 0xe9, 0x53, 0x74, 0x0f, 0x16, 0x54, 0xdc, + 0x31, 0xc7, 0xc5, 0xd8, 0x28, 0x60, 0x98, 0x5a, 0x7d, 0x53, 0x11, 0x24, 0x7b, 0x7a, 0x7e, 0xfa, + 0x9e, 0x96, 0x74, 0x02, 0x59, 0x73, 0x4b, 0x0b, 0xa5, 0x6e, 0x49, 0x50, 0x9c, 0xc5, 0x2d, 0xd5, + 0x4b, 0xdd, 0x92, 0xe4, 0x31, 0xd5, 0x2d, 0xfd, 0x21, 0x1d, 0xcc, 0x63, 0xe8, 0xe6, 0x37, 0x88, + 0x74, 0x0c, 0xef, 0xc0, 0x7c, 0xc8, 0x21, 0x53, 0x9c, 0x8c, 0x24, 0x91, 0x88, 0xc6, 0x03, 0x58, + 0xc9, 0x58, 0x80, 0x08, 0xfb, 0x62, 0x7b, 0xa9, 0x9c, 0xc9, 0x5e, 0x8c, 0xff, 0xab, 0xe8, 0xd6, + 0xfb, 0xc0, 0x1d, 0x53, 0x1c, 0xe4, 0xac, 0xf7, 0x5d, 0xc5, 0x54, 0x98, 0xee, 0xd5, 0x32, 0xa6, + 0x22, 0x22, 0x93, 0x96, 0x38, 0x80, 0x36, 0xd7, 0xa1, 0x15, 0xe2, 0x31, 0x3f, 0x10, 0x65, 0x28, + 0xf2, 0xe3, 0x02, 0x6a, 0x31, 0xae, 0x30, 0x80, 0x81, 0x44, 0x17, 0xea, 0x6b, 0x8d, 0x75, 0x58, + 0xef, 0x3e, 0xa0, 0x3c, 0xd2, 0xb9, 0xf4, 0xf0, 0x25, 0xdb, 0xfb, 0x2c, 0x93, 0x28, 0xf0, 0xf4, + 0xc7, 0x5c, 0x8c, 0x29, 0x4a, 0x10, 0x72, 0x9a, 0x12, 0xd1, 0xf8, 0xdb, 0x1a, 0x40, 0xd2, 0xf9, + 0xca, 0x6e, 0xfa, 0x0f, 0xe2, 0x2d, 0x28, 0xa2, 0x89, 0xd7, 0x0a, 0xf8, 0x15, 0x6e, 0xbe, 0x07, + 0xe9, 0xcd, 0x27, 0xe2, 0x8a, 0x37, 0x8a, 0xa8, 0x5f, 0xd9, 0x6d, 0xb7, 0x05, 0xab, 0x59, 0x75, + 0xcb, 0x4d, 0x77, 0x13, 0xe6, 0x5c, 0x8a, 0x27, 0x22, 0x2f, 0xd6, 0x93, 0x0b, 0x0d, 0x57, 0x60, + 0x18, 0xaf, 0x43, 0x63, 0x77, 0x62, 0x8f, 0xf0, 0xc0, 0xc7, 0x43, 0x36, 0x96, 0xcb, 0x1a, 0x72, + 0x7c, 0xd1, 0x30, 0x36, 0xa1, 0xfe, 0x10, 0x9f, 0x8a, 0x3d, 0x78, 0x46, 0xf9, 0x8c, 0x7f, 0xa9, + 0xc0, 0x1a, 0xf7, 0x9d, 0x5b, 0x2a, 0x2b, 0x35, 0x71, 0x48, 0xa2, 0x60, 0x88, 0x43, 0xae, 0x52, + 0x3f, 0xb2, 0x7c, 0x1c, 0xb8, 0xc4, 0x91, 0x39, 0x5c, 0x63, 0xe8, 0x47, 0x07, 0x1c, 0xc0, 0x32, + 0x57, 0xd6, 0xfd, 0xab, 0x88, 0x48, 0xdb, 0xaa, 0x99, 0xf5, 0xa1, 0x1f, 0xfd, 0x94, 0xb5, 0x15, + 0x6d, 0x78, 0x62, 0x07, 0x38, 0xe4, 0x36, 0x24, 0x68, 0x07, 0x1c, 0x80, 0xde, 0x81, 0x8b, 0x13, + 0x3c, 0x21, 0xc1, 0xa9, 0x35, 0x76, 0x27, 0x2e, 0xb5, 0x5c, 0xcf, 0x3a, 0x3a, 0xa5, 0x38, 0x94, + 0x86, 0x83, 0x44, 0xe7, 0x23, 0xd6, 0xb7, 0xeb, 0x7d, 0xce, 0x7a, 0x90, 0x01, 0x2d, 0x42, 0x26, + 0x56, 0x38, 0x24, 0x01, 0xb6, 0x6c, 0xe7, 0x6b, 0x7e, 0x78, 0xd4, 0xcc, 0x26, 0x21, 0x93, 0x01, + 0x83, 0xf5, 0x9d, 0xaf, 0x0d, 0x1b, 0x5a, 0xa9, 0x9c, 0x8e, 0xc5, 0xfa, 0x3c, 0x79, 0x93, 0xb1, + 0x3e, 0xfb, 0x66, 0xb0, 0x80, 0x8c, 0xd5, 0x3a, 0xf0, 0x6f, 0x06, 0xa3, 0xa7, 0xbe, 0x0a, 0xf4, + 0xf9, 0x37, 0x5b, 0xb0, 0x31, 0x7e, 0x2a, 0xd3, 0xea, 0x86, 0x29, 0x1a, 0x86, 0x03, 0xb0, 0x65, + 0xfb, 0xf6, 0x91, 0x3b, 0x76, 0xe9, 0x29, 0xba, 0x09, 0x1d, 0xdb, 0x71, 0xac, 0xa1, 0x82, 0xb8, + 0x58, 0xd5, 0x38, 0x96, 0x6c, 0xc7, 0xd9, 0xd2, 0xc0, 0xe8, 0x47, 0xb0, 0xec, 0x04, 0xc4, 0x4f, + 0xe3, 0x8a, 0xa2, 0x47, 0x87, 0x75, 0xe8, 0xc8, 0xc6, 0x5f, 0xcf, 0xc2, 0xd5, 0xb4, 0x5a, 0xb2, + 0x59, 0xf2, 0x07, 0xb0, 0x98, 0x19, 0x35, 0x9d, 0x9e, 0x26, 0x42, 0x9a, 0x29, 0xc4, 0x4c, 0x1e, + 0x59, 0xcd, 0xe6, 0x91, 0xc5, 0xe9, 0x77, 0xed, 0x87, 0x48, 0xbf, 0x67, 0xbf, 0x4f, 0xfa, 0x3d, + 0x77, 0xa6, 0xf4, 0xfb, 0x4d, 0x5e, 0xd0, 0x52, 0x44, 0x3c, 0x6d, 0x9a, 0x17, 0x55, 0x97, 0x18, + 0xc7, 0x53, 0x85, 0xaf, 0x4c, 0x9a, 0xbe, 0x70, 0x9e, 0x34, 0xbd, 0x5e, 0x9a, 0xa6, 0x33, 0x8b, + 0xf0, 0x7d, 0x3b, 0x98, 0x90, 0xc0, 0xf2, 0x03, 0x72, 0xec, 0x8e, 0x71, 0xb7, 0xc1, 0x45, 0x58, + 0x52, 0xf0, 0x03, 0x01, 0x46, 0x77, 0x61, 0x25, 0xc4, 0xc3, 0x21, 0x99, 0xf8, 0x0a, 0x53, 0x54, + 0x81, 0x80, 0xa3, 0x23, 0xd9, 0x27, 0xb1, 0x0f, 0x6c, 0x7a, 0x62, 0xfc, 0x7d, 0x05, 0x56, 0xd2, + 0x66, 0x21, 0x73, 0xb6, 0x4f, 0xa1, 0x11, 0xa8, 0x7d, 0x2b, 0x4d, 0x61, 0x23, 0x1d, 0x1b, 0xe5, + 0xf7, 0xb7, 0x99, 0x90, 0xa0, 0x9f, 0x96, 0x66, 0xff, 0x6f, 0x96, 0xb0, 0x79, 0x51, 0xfe, 0x6f, + 0xf4, 0x61, 0x39, 0x46, 0x9e, 0x9a, 0x7b, 0x6b, 0xb9, 0x74, 0x35, 0x9d, 0x4b, 0x7b, 0x30, 0xbf, + 0x8d, 0x9f, 0xba, 0x43, 0xfc, 0x83, 0x14, 0xd3, 0x36, 0xa0, 0xe9, 0xe3, 0x60, 0xe2, 0x86, 0x61, + 0x6c, 0xd2, 0x0d, 0x53, 0x07, 0x19, 0xff, 0x35, 0x07, 0x4b, 0xd9, 0x95, 0x7d, 0x3f, 0x97, 0xba, + 0xf7, 0x92, 0x3d, 0x96, 0x9d, 0x9f, 0x76, 0x7e, 0xde, 0x50, 0x2e, 0xba, 0x9a, 0x89, 0xe0, 0x63, + 0x2f, 0x2e, 0xdd, 0x36, 0x9b, 0xff, 0x90, 0x4c, 0x26, 0xb6, 0xe7, 0xa8, 0x42, 0xa7, 0x6c, 0xb2, + 0xd5, 0xb2, 0x83, 0x11, 0xdb, 0x38, 0x0c, 0xcc, 0xbf, 0xd1, 0x6b, 0xd0, 0x64, 0x91, 0xb0, 0xeb, + 0xf1, 0xcc, 0x9f, 0x6f, 0x8b, 0x86, 0x09, 0x12, 0xb4, 0xed, 0x06, 0xe8, 0x3a, 0xcc, 0x62, 0xef, + 0xa9, 0x3a, 0x29, 0x93, 0x4a, 0xa8, 0x3a, 0x1a, 0x4c, 0xde, 0x8d, 0xde, 0x84, 0xf9, 0x09, 0x89, + 0x3c, 0xaa, 0x62, 0xe2, 0x76, 0x8c, 0xc8, 0xcb, 0x97, 0xa6, 0xec, 0x45, 0x37, 0x61, 0xc1, 0xe1, + 0x3a, 0x50, 0x81, 0xef, 0x52, 0x52, 0x3d, 0xe0, 0x70, 0x53, 0xf5, 0xa3, 0x8f, 0xe3, 0x33, 0xbe, + 0x91, 0x39, 0xa5, 0x33, 0x8b, 0x5a, 0x78, 0xd0, 0x3f, 0x4c, 0x1f, 0xf4, 0xc0, 0x59, 0xdc, 0x2c, + 0x65, 0x31, 0x3d, 0xf7, 0xbf, 0x04, 0xf5, 0x31, 0x19, 0x09, 0x3b, 0x68, 0x8a, 0xb2, 0xf8, 0x98, + 0x8c, 0xb8, 0x19, 0xac, 0xb0, 0xc0, 0xc6, 0x71, 0xbd, 0xee, 0x22, 0xdf, 0xf0, 0xa2, 0xc1, 0xce, + 0x2b, 0xfe, 0x61, 0x11, 0x6f, 0x88, 0xbb, 0x2d, 0xde, 0xd5, 0xe0, 0x90, 0x7d, 0x6f, 0xc8, 0x8f, + 0x53, 0x4a, 0x4f, 0xbb, 0x6d, 0x0e, 0x67, 0x9f, 0x2c, 0x1e, 0x15, 0x99, 0xc8, 0x52, 0x26, 0x1e, + 0x2d, 0xda, 0x9f, 0xaf, 0x40, 0x71, 0xe1, 0x1f, 0x2b, 0xb0, 0xba, 0xc5, 0xc3, 0x31, 0xcd, 0x13, + 0x9c, 0x23, 0x39, 0x46, 0x77, 0xe3, 0x2a, 0x44, 0x36, 0xc7, 0xcd, 0x4e, 0x56, 0xe2, 0xa1, 0xfb, + 0xd0, 0x56, 0x3c, 0x25, 0x65, 0xed, 0x45, 0xf5, 0x8b, 0x56, 0xa8, 0x37, 0x8d, 0x8f, 0x61, 0x2d, + 0x27, 0xb3, 0x0c, 0x9d, 0x5e, 0x87, 0xc5, 0xc4, 0x23, 0xc4, 0x22, 0x37, 0x63, 0xd8, 0xae, 0x63, + 0xdc, 0x83, 0x8b, 0x03, 0x6a, 0x07, 0x34, 0x37, 0xe1, 0x33, 0xd0, 0xf2, 0x12, 0x46, 0x9a, 0x56, + 0x56, 0x19, 0x06, 0xb0, 0x32, 0xa0, 0xc4, 0x7f, 0x09, 0xa6, 0x6c, 0xa7, 0xb3, 0x69, 0x93, 0x88, + 0xca, 0x78, 0x49, 0x35, 0x8d, 0x35, 0x51, 0x70, 0xc9, 0x8f, 0xf6, 0x11, 0xac, 0x8a, 0x7a, 0xc7, + 0xcb, 0x4c, 0xe2, 0x92, 0xaa, 0xb6, 0xe4, 0xf9, 0x6e, 0xc3, 0x85, 0xc4, 0x95, 0x27, 0xa9, 0xdb, + 0xed, 0x74, 0xea, 0xb6, 0x96, 0xd7, 0x71, 0x2a, 0x73, 0xfb, 0xab, 0xaa, 0xe6, 0x30, 0x4b, 0x12, + 0xb7, 0xcd, 0x74, 0xe2, 0x76, 0xa5, 0x84, 0x65, 0x2a, 0x6f, 0xcb, 0x5b, 0x64, 0xad, 0xc0, 0x22, + 0xcd, 0x5c, 0x76, 0x37, 0x9b, 0x29, 0x34, 0x67, 0x64, 0xfb, 0xbd, 0x24, 0x77, 0xbb, 0x22, 0xb9, + 0x8b, 0x87, 0x8e, 0x6b, 0x50, 0x77, 0x33, 0xc9, 0x5d, 0xb7, 0x4c, 0xcc, 0x38, 0xb7, 0xfb, 0xcb, + 0x59, 0x68, 0xc4, 0x7d, 0xb9, 0x85, 0xcd, 0x2f, 0x52, 0xb5, 0x60, 0x91, 0xf4, 0xf3, 0xab, 0xf6, + 0x32, 0xe7, 0xd7, 0xec, 0x8b, 0xce, 0xaf, 0xcb, 0xd0, 0xe0, 0x1f, 0x56, 0x80, 0x8f, 0xe5, 0x79, + 0x54, 0xe7, 0x00, 0x13, 0x1f, 0x27, 0x06, 0x35, 0x7f, 0x16, 0x83, 0xca, 0x64, 0x91, 0x0b, 0xd9, + 0x2c, 0xf2, 0xfd, 0xf8, 0x84, 0x11, 0x67, 0xd1, 0x7a, 0x9e, 0x5d, 0xe1, 0xd9, 0xb2, 0x93, 0x3e, + 0x5b, 0xc4, 0xf1, 0x74, 0xad, 0x80, 0xf8, 0x95, 0xcd, 0x21, 0x1f, 0x89, 0x1c, 0x52, 0xb7, 0x2a, + 0xe9, 0x08, 0x37, 0x01, 0xe2, 0x3d, 0xaf, 0x12, 0x49, 0x94, 0x9f, 0x9a, 0xa9, 0x61, 0x31, 0xaf, + 0x92, 0x5a, 0xff, 0xa4, 0x50, 0x7a, 0x06, 0xaf, 0xf2, 0xcf, 0x7a, 0x94, 0x54, 0x52, 0x6b, 0x7c, + 0x3f, 0x57, 0x76, 0x38, 0x9b, 0xd5, 0xdd, 0x4e, 0x57, 0x1d, 0xce, 0x67, 0x2e, 0xb9, 0xa2, 0x03, + 0x3f, 0xd4, 0xed, 0x40, 0x76, 0x8b, 0x7c, 0xb1, 0x21, 0x21, 0x7d, 0xca, 0x42, 0xa9, 0x63, 0xd7, + 0x73, 0xc3, 0x13, 0xd1, 0x3f, 0xcf, 0xfb, 0x41, 0x81, 0xfa, 0xfc, 0x0a, 0x18, 0x3f, 0x73, 0xa9, + 0x35, 0x24, 0x0e, 0xe6, 0xc6, 0x38, 0x67, 0xd6, 0x19, 0x60, 0x8b, 0x38, 0x38, 0xd9, 0x20, 0xf5, + 0x73, 0x6d, 0x90, 0x46, 0x66, 0x83, 0xac, 0xc2, 0x7c, 0x80, 0xed, 0x90, 0x78, 0x32, 0xec, 0x97, + 0x2d, 0x76, 0x56, 0x4c, 0x70, 0x18, 0xb2, 0x01, 0x64, 0x00, 0x23, 0x9b, 0x5a, 0x98, 0xb5, 0x58, + 0x16, 0x66, 0x4d, 0x29, 0x66, 0x66, 0xc2, 0xac, 0x56, 0x59, 0x98, 0x75, 0x96, 0x5a, 0xa6, 0x16, + 0x44, 0xb6, 0xa7, 0x06, 0x91, 0x7a, 0x38, 0xb6, 0x94, 0x0a, 0xc7, 0xfe, 0x90, 0x7b, 0xea, 0x21, + 0xac, 0xe5, 0x76, 0x81, 0xdc, 0x54, 0x77, 0x33, 0xd5, 0xd0, 0x6e, 0xd9, 0x02, 0xc5, 0xc5, 0xd0, + 0x3f, 0x85, 0xa5, 0x9d, 0x67, 0x78, 0x38, 0x38, 0xf5, 0x86, 0xe7, 0x88, 0x08, 0x3a, 0x50, 0x1b, + 0x4e, 0x1c, 0x59, 0x06, 0x60, 0x9f, 0x7a, 0x8c, 0x50, 0x4b, 0xc7, 0x08, 0x16, 0x74, 0x92, 0x11, + 0xa4, 0x9c, 0xab, 0x4c, 0x4e, 0x87, 0x21, 0x33, 0xe6, 0x8b, 0xa6, 0x6c, 0x49, 0x38, 0x0e, 0x02, + 0x3e, 0x6b, 0x01, 0xc7, 0x41, 0x90, 0xb6, 0xe8, 0x5a, 0xda, 0xa2, 0x8d, 0xaf, 0xa1, 0xc9, 0x06, + 0xf8, 0x5e, 0xe2, 0xcb, 0x40, 0xb9, 0x96, 0x04, 0xca, 0x71, 0xbc, 0x3d, 0xab, 0xc5, 0xdb, 0xc6, + 0x06, 0x2c, 0x8a, 0xb1, 0xe4, 0x44, 0x3a, 0x50, 0x8b, 0x82, 0xb1, 0xd2, 0x5b, 0x14, 0x8c, 0x8d, + 0x3f, 0x86, 0x56, 0x9f, 0x52, 0x7b, 0x78, 0x72, 0x0e, 0x79, 0xe2, 0xb1, 0xaa, 0x7a, 0x6c, 0x9f, + 0x93, 0xc9, 0x30, 0xa0, 0xad, 0x78, 0x97, 0x8e, 0xbf, 0x07, 0xe8, 0x80, 0x04, 0xf4, 0x01, 0x09, + 0xbe, 0xb5, 0x03, 0xe7, 0x7c, 0xb1, 0x32, 0x82, 0x59, 0xf9, 0xb0, 0xa4, 0x76, 0x63, 0xce, 0xe4, + 0xdf, 0xc6, 0x5b, 0x70, 0x21, 0xc5, 0xaf, 0x74, 0xe0, 0x0f, 0xa0, 0xc9, 0x5d, 0x88, 0x8c, 0xa7, + 0x6e, 0xe8, 0xb5, 0xbe, 0x69, 0x7e, 0x86, 0x65, 0xdc, 0xec, 0x8c, 0xe0, 0xf0, 0xd8, 0xa1, 0xff, + 0x38, 0x13, 0x75, 0xac, 0xa4, 0xe9, 0x33, 0x11, 0xc7, 0x3f, 0x54, 0x60, 0x8e, 0xc3, 0x73, 0x1e, + 0xfd, 0x32, 0x34, 0x02, 0xec, 0x13, 0x8b, 0xda, 0xa3, 0xf8, 0xad, 0x0e, 0x03, 0x3c, 0xb1, 0x47, + 0x21, 0x7f, 0x6a, 0xc4, 0x3a, 0x1d, 0x77, 0x84, 0x43, 0xaa, 0x1e, 0xec, 0x34, 0x19, 0x6c, 0x5b, + 0x80, 0xd8, 0x92, 0x84, 0xee, 0x9f, 0x89, 0x70, 0x62, 0xd6, 0xe4, 0xdf, 0xe8, 0xba, 0xb8, 0x75, + 0x9f, 0x52, 0xda, 0xe1, 0x57, 0xf1, 0x3d, 0xa8, 0x67, 0xaa, 0x39, 0x71, 0xdb, 0xf8, 0x18, 0x90, + 0x3e, 0x67, 0xb9, 0xa8, 0x6f, 0xc2, 0x3c, 0x5f, 0x12, 0x75, 0x1e, 0xb6, 0xd3, 0x93, 0x36, 0x65, + 0xaf, 0xf1, 0x29, 0x20, 0xb1, 0x8a, 0xa9, 0x33, 0xf0, 0xec, 0x2b, 0xfe, 0x11, 0x5c, 0x48, 0xd1, + 0xc7, 0x97, 0xac, 0x29, 0x06, 0xd9, 0xd1, 0x25, 0xf1, 0xbf, 0x56, 0x00, 0xfa, 0x11, 0x3d, 0x91, + 0x85, 0x06, 0x7d, 0x96, 0x95, 0xf4, 0x2c, 0x59, 0x9f, 0x6f, 0x87, 0xe1, 0xb7, 0x24, 0x50, 0x41, + 0x5e, 0xdc, 0xe6, 0x45, 0x82, 0x88, 0x9e, 0xa8, 0xd2, 0x25, 0xfb, 0x46, 0xd7, 0xa1, 0x2d, 0x9e, + 0x58, 0x59, 0xb6, 0xe3, 0x04, 0x38, 0x0c, 0x65, 0x0d, 0xb3, 0x25, 0xa0, 0x7d, 0x01, 0x64, 0x68, + 0xae, 0x83, 0x3d, 0xea, 0xd2, 0x53, 0x8b, 0x92, 0x6f, 0xb0, 0x27, 0xc3, 0xb7, 0x96, 0x82, 0x3e, + 0x61, 0x40, 0x86, 0x16, 0xe0, 0x91, 0x1b, 0xd2, 0x40, 0xa1, 0xa9, 0x9a, 0x9a, 0x84, 0x72, 0x34, + 0xe3, 0xd7, 0x15, 0xe8, 0x1c, 0x44, 0xe3, 0xb1, 0x98, 0xe4, 0x79, 0xd7, 0x12, 0xbd, 0x25, 0xe7, + 0x51, 0xcd, 0x58, 0x43, 0xb2, 0x44, 0x72, 0x72, 0xdf, 0x3f, 0xad, 0xbc, 0x0b, 0xcb, 0x9a, 0xa0, + 0x52, 0x69, 0xa9, 0x53, 0xba, 0x92, 0x3e, 0xa5, 0x99, 0xa1, 0x88, 0x4c, 0xea, 0xe5, 0x26, 0x67, + 0x5c, 0x84, 0x0b, 0x29, 0x7a, 0x99, 0x85, 0xdd, 0x82, 0x96, 0xbc, 0xe8, 0x94, 0x46, 0x70, 0x09, + 0xea, 0xcc, 0xbd, 0x0c, 0x5d, 0x47, 0xd5, 0xac, 0x17, 0x7c, 0xe2, 0x6c, 0xb9, 0x4e, 0x60, 0xec, + 0x41, 0xcb, 0x14, 0xec, 0x25, 0xee, 0x27, 0xd0, 0x96, 0xd7, 0xa2, 0x56, 0xea, 0x79, 0x40, 0x52, + 0x60, 0x4d, 0xf1, 0x36, 0x5b, 0x9e, 0xde, 0x34, 0x7e, 0x09, 0xbd, 0x43, 0xdf, 0x61, 0xc1, 0x94, + 0xce, 0x55, 0x4d, 0xed, 0x13, 0x50, 0x4f, 0x00, 0xcb, 0x98, 0xa7, 0xc9, 0x5a, 0x81, 0xde, 0x34, + 0xae, 0xc2, 0xe5, 0x42, 0xe6, 0x72, 0xde, 0x3e, 0x74, 0x92, 0x0e, 0xc7, 0x55, 0xa5, 0x7a, 0x5e, + 0x82, 0xaf, 0x68, 0x25, 0xf8, 0xd5, 0xf8, 0x18, 0x16, 0x0e, 0x5d, 0xb6, 0xb4, 0xa0, 0xa9, 0x56, + 0x16, 0x34, 0xcd, 0xa6, 0x82, 0x26, 0xe3, 0xcb, 0x78, 0xf5, 0x64, 0xc4, 0xfa, 0x21, 0x0f, 0x9b, + 0xc5, 0xd8, 0xca, 0x4d, 0x5c, 0x2a, 0x98, 0x9c, 0xc0, 0x30, 0x35, 0x64, 0x63, 0x09, 0x5a, 0x29, + 0x87, 0x61, 0xdc, 0x87, 0x76, 0xc6, 0x03, 0xdc, 0xc9, 0xc4, 0x0f, 0xb9, 0x65, 0xcb, 0x44, 0x0f, + 0x2b, 0xd2, 0x11, 0x3d, 0x08, 0x77, 0xbd, 0x63, 0xa2, 0xf8, 0x5e, 0x83, 0xe6, 0x61, 0xd9, 0x73, + 0xba, 0x59, 0x75, 0x83, 0xf3, 0x16, 0x2c, 0x0f, 0x28, 0x09, 0xec, 0x11, 0xde, 0xe5, 0xbb, 0xf6, + 0xd8, 0x15, 0x77, 0x1c, 0x51, 0x14, 0xfb, 0x6f, 0xfe, 0x6d, 0xfc, 0x47, 0x05, 0x96, 0x1e, 0xb8, + 0x63, 0x1c, 0x9e, 0x86, 0x14, 0x4f, 0x0e, 0x79, 0x2c, 0x79, 0x05, 0x1a, 0x4c, 0x9a, 0x90, 0xda, + 0x13, 0x5f, 0xdd, 0xf0, 0xc4, 0x00, 0xb6, 0x46, 0xa1, 0x60, 0xad, 0xb2, 0x4b, 0x3d, 0x8e, 0xcf, + 0x8d, 0xca, 0x62, 0x6b, 0x09, 0x42, 0xef, 0x02, 0x44, 0x21, 0x76, 0xe4, 0xad, 0x4e, 0x2d, 0x73, + 0xf4, 0x1c, 0xea, 0xd5, 0x7b, 0x86, 0x27, 0xae, 0x78, 0xde, 0x83, 0xa6, 0xeb, 0x11, 0x07, 0xf3, + 0xea, 0xbd, 0x23, 0x33, 0xcf, 0x62, 0x2a, 0x10, 0x88, 0x87, 0x21, 0x76, 0x8c, 0x3f, 0x91, 0x5e, + 0x58, 0x2d, 0x9e, 0xd4, 0xc1, 0x0e, 0x2c, 0x8b, 0x0d, 0x7d, 0x1c, 0x4f, 0x5a, 0x29, 0x3a, 0x09, + 0xe7, 0x32, 0x0b, 0x62, 0x76, 0x5c, 0x79, 0x2a, 0x2a, 0x0a, 0xe3, 0x1e, 0x5c, 0x4c, 0xc5, 0x7c, + 0xe7, 0x49, 0x95, 0xbe, 0xc8, 0xe4, 0x59, 0x89, 0x81, 0xc8, 0x44, 0x47, 0xd9, 0x47, 0x49, 0xa2, + 0x13, 0x8a, 0x44, 0x27, 0x34, 0x4c, 0xb8, 0x94, 0x4a, 0xff, 0x52, 0x82, 0xbc, 0x97, 0x39, 0xe2, + 0xaf, 0x96, 0x30, 0xcb, 0x9c, 0xf5, 0xff, 0x53, 0x81, 0x95, 0x22, 0x84, 0x97, 0x2c, 0x34, 0xfc, + 0xac, 0xe4, 0xae, 0xfd, 0xee, 0x54, 0x69, 0x7e, 0x2f, 0x25, 0x99, 0x87, 0xd0, 0x2b, 0x5a, 0xbd, + 0xbc, 0x2a, 0x6a, 0x67, 0x50, 0xc5, 0xff, 0x56, 0xb5, 0xd2, 0x59, 0x9f, 0xd2, 0xc0, 0x3d, 0x8a, + 0x98, 0xf1, 0xfe, 0x50, 0x29, 0xf0, 0xfd, 0x38, 0xbd, 0x13, 0xeb, 0x77, 0x23, 0x4f, 0x95, 0x8c, + 0x5a, 0x98, 0xe2, 0xed, 0xa7, 0x53, 0x3c, 0x51, 0x14, 0xbb, 0x3d, 0x95, 0xcd, 0x2b, 0x5b, 0xf7, + 0x78, 0x5e, 0x81, 0x76, 0x5a, 0x0f, 0xe8, 0x63, 0x00, 0x3b, 0x96, 0x5c, 0x9a, 0xfc, 0x95, 0x69, + 0xb3, 0x33, 0x35, 0x7c, 0x74, 0x0d, 0x6a, 0x43, 0x3f, 0x92, 0x1a, 0x49, 0x6e, 0x47, 0xb6, 0xfc, + 0x48, 0x38, 0x00, 0xd6, 0xcb, 0x82, 0x66, 0x71, 0x03, 0x9d, 0xf3, 0x5c, 0x8f, 0x39, 0x58, 0xa0, + 0x4a, 0x1c, 0xf4, 0x19, 0xb4, 0xbf, 0x0d, 0x5c, 0x6a, 0x1f, 0x8d, 0xb1, 0x35, 0xb6, 0x4f, 0x71, + 0x20, 0x3d, 0x57, 0xb9, 0x97, 0x69, 0x29, 0xfc, 0x47, 0x0c, 0xdd, 0x88, 0xa0, 0xae, 0xc6, 0x7f, + 0x81, 0x47, 0x7e, 0x08, 0x6b, 0x11, 0x43, 0xb3, 0xf8, 0x2d, 0xb8, 0x67, 0x7b, 0xc4, 0x0a, 0x31, + 0x3b, 0x9a, 0xd4, 0xcb, 0xb3, 0x62, 0x6f, 0xb9, 0xc2, 0x89, 0xb6, 0x48, 0x80, 0xf7, 0x6c, 0x8f, + 0x0c, 0x04, 0x85, 0x31, 0x81, 0xa6, 0x36, 0x9d, 0x17, 0x8c, 0x7c, 0x1f, 0x96, 0xd5, 0xbd, 0x53, + 0x88, 0xa9, 0xf4, 0xeb, 0xd3, 0xc6, 0x5c, 0x92, 0xe8, 0x03, 0x4c, 0xb9, 0x77, 0xbf, 0x75, 0x05, + 0xea, 0xea, 0x35, 0x3e, 0x5a, 0x80, 0xda, 0x93, 0xad, 0x83, 0xce, 0x0c, 0xfb, 0x38, 0xdc, 0x3e, + 0xe8, 0x54, 0x6e, 0xdd, 0x83, 0xa5, 0xcc, 0xcb, 0x12, 0xb4, 0x0c, 0xad, 0x41, 0x7f, 0x6f, 0xfb, + 0xf3, 0xfd, 0x9f, 0x5b, 0xe6, 0x4e, 0x7f, 0xfb, 0x17, 0x9d, 0x19, 0xb4, 0x02, 0x1d, 0x05, 0xda, + 0xdb, 0x7f, 0x22, 0xa0, 0x95, 0x5b, 0xdf, 0x64, 0x6c, 0x04, 0xa3, 0x8b, 0xb0, 0xbc, 0xb5, 0xbf, + 0xf7, 0xa4, 0xbf, 0xbb, 0xb7, 0x63, 0x5a, 0x5b, 0xe6, 0x4e, 0xff, 0xc9, 0xce, 0x76, 0x67, 0x26, + 0x0d, 0x36, 0x0f, 0xf7, 0xf6, 0x76, 0xf7, 0xbe, 0xe8, 0x54, 0x18, 0xd7, 0x04, 0xbc, 0xf3, 0xf3, + 0x5d, 0x86, 0x5c, 0x4d, 0x23, 0x1f, 0xee, 0x3d, 0xdc, 0xdb, 0xff, 0xd9, 0x5e, 0xa7, 0xb6, 0xf9, + 0xdb, 0x45, 0x68, 0xab, 0x43, 0x1c, 0x07, 0xfc, 0x76, 0xf2, 0x53, 0x58, 0x50, 0x3f, 0x4a, 0x24, + 0xde, 0x23, 0xfd, 0x57, 0x47, 0xaf, 0x9b, 0xef, 0x90, 0xc1, 0xd0, 0x0c, 0x3a, 0xe0, 0xc1, 0x89, + 0xf6, 0x8a, 0xe7, 0xaa, 0x1e, 0x2e, 0xe4, 0x9e, 0x09, 0xf5, 0xd6, 0xcb, 0xba, 0x63, 0x8e, 0x03, + 0x16, 0x91, 0xe8, 0x2f, 0x30, 0xd1, 0xba, 0x7e, 0x6e, 0xe7, 0x5f, 0x76, 0xf6, 0x5e, 0x2b, 0xed, + 0x8f, 0x99, 0xfe, 0x02, 0x3a, 0xd9, 0xb7, 0x97, 0x28, 0xb9, 0x65, 0x2e, 0x79, 0xd7, 0xd9, 0x7b, + 0x7d, 0x0a, 0x86, 0xce, 0x3a, 0xf7, 0x7e, 0x71, 0xa3, 0xfc, 0x05, 0x5a, 0x8e, 0x75, 0xd9, 0xb3, + 0x36, 0xb1, 0x14, 0xe9, 0xd7, 0x37, 0x48, 0x7f, 0x35, 0x58, 0xf0, 0x0a, 0x4b, 0x5b, 0x8a, 0xe2, + 0x67, 0x3b, 0xc6, 0x0c, 0xfa, 0x0a, 0x96, 0x32, 0x17, 0x53, 0x28, 0xa1, 0x2a, 0xbe, 0x66, 0xeb, + 0x6d, 0x94, 0x23, 0xa4, 0xf5, 0xa6, 0x5f, 0x3b, 0xa5, 0xf4, 0x56, 0x70, 0x97, 0x95, 0xd2, 0x5b, + 0xe1, 0x7d, 0x15, 0x37, 0xaf, 0xd4, 0xe5, 0x92, 0x66, 0x5e, 0x45, 0x37, 0x59, 0xbd, 0xf5, 0xb2, + 0x6e, 0x7d, 0xfa, 0x99, 0x8b, 0x25, 0x6d, 0xfa, 0xc5, 0xf7, 0x55, 0xbd, 0x8d, 0x72, 0x84, 0xac, + 0xae, 0x92, 0x2a, 0x77, 0x46, 0x57, 0xb9, 0x4b, 0x95, 0x8c, 0xae, 0xf2, 0xe5, 0x71, 0xa9, 0xab, + 0x4c, 0xb9, 0xfa, 0xb5, 0xd2, 0x72, 0x5e, 0x5e, 0x57, 0xc5, 0x15, 0x42, 0x63, 0x06, 0xf5, 0xa1, + 0xae, 0xea, 0x71, 0x28, 0xd9, 0xdd, 0x99, 0x22, 0x60, 0xef, 0x52, 0x41, 0x4f, 0xcc, 0xe2, 0x3d, + 0x98, 0x65, 0x50, 0xb4, 0x92, 0x42, 0x52, 0xa4, 0x17, 0x33, 0xd0, 0x98, 0xec, 0x23, 0x98, 0x17, + 0xe5, 0x2b, 0x94, 0xe4, 0x15, 0xa9, 0x5a, 0x59, 0x6f, 0x2d, 0x07, 0x8f, 0x89, 0xbf, 0x14, 0x3f, + 0x4f, 0xc9, 0x3a, 0x14, 0xba, 0x9c, 0x7a, 0xd6, 0x9f, 0xae, 0x76, 0xf5, 0xae, 0x14, 0x77, 0xea, + 0xfa, 0xca, 0x1c, 0xce, 0xeb, 0x65, 0xd1, 0x53, 0x4e, 0x5f, 0xc5, 0xd1, 0x98, 0x31, 0x83, 0x2c, + 0x51, 0xd2, 0xc9, 0x30, 0x36, 0x8a, 0x15, 0x9d, 0x62, 0x7e, 0x6d, 0x2a, 0x4e, 0x3c, 0xc0, 0x11, + 0x5c, 0x28, 0x48, 0x4e, 0x51, 0x42, 0x5d, 0x9e, 0x17, 0xf7, 0xde, 0x98, 0x8e, 0xa4, 0xab, 0x48, + 0xda, 0xda, 0xaa, 0xbe, 0x41, 0x35, 0x13, 0x5b, 0xcb, 0xc1, 0x15, 0xf1, 0xe6, 0x5f, 0xd4, 0x60, + 0x51, 0x94, 0x10, 0xe4, 0x01, 0xf3, 0x05, 0x40, 0x52, 0xe5, 0x42, 0xbd, 0xd4, 0x34, 0x53, 0xe5, + 0xbe, 0xde, 0xe5, 0xc2, 0x3e, 0x5d, 0xf9, 0x5a, 0xc1, 0x4a, 0x53, 0x7e, 0xbe, 0x0c, 0xa6, 0x29, + 0xbf, 0xa0, 0xc6, 0x65, 0xcc, 0xa0, 0x6d, 0x68, 0xc4, 0x55, 0x14, 0xa4, 0x15, 0x5f, 0x32, 0x25, + 0xa0, 0x5e, 0xaf, 0xa8, 0x4b, 0x97, 0x48, 0xab, 0x8c, 0x68, 0x12, 0xe5, 0xeb, 0x2d, 0x9a, 0x44, + 0x45, 0xc5, 0x94, 0x64, 0x76, 0x22, 0x11, 0xcc, 0xce, 0x2e, 0x95, 0x5b, 0x67, 0x67, 0x97, 0xce, + 0x1d, 0x8d, 0x99, 0xcf, 0xaf, 0xfc, 0xe6, 0x77, 0xeb, 0x95, 0xff, 0xfc, 0xdd, 0xfa, 0xcc, 0x9f, + 0x3f, 0x5f, 0xaf, 0xfc, 0xe6, 0xf9, 0x7a, 0xe5, 0xdf, 0x9e, 0xaf, 0x57, 0x7e, 0xfb, 0x7c, 0xbd, + 0xf2, 0xdd, 0x7f, 0xaf, 0xcf, 0x1c, 0xcd, 0xf3, 0xbf, 0x09, 0xdf, 0xfd, 0xff, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xef, 0x34, 0xd8, 0x03, 0x01, 0x3a, 0x00, 0x00, } diff --git a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto index b34451e2e2e..71bfe0595c6 100644 --- a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto +++ b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto @@ -272,32 +272,12 @@ message PodSandboxConfig { // // In general, in order to preserve a well-defined interface between the // kubelet and the container runtime, annotations SHOULD NOT influence - // runtime behaviour. For legacy reasons, there are some annotations which - // currently explicitly break this rule, listed below; in future versions - // of the interface these will be promoted to typed features. + // runtime behaviour. // // Annotations can also be useful for runtime authors to experiment with // new features that are opaque to the Kubernetes APIs (both user-facing // and the CRI). Whenever possible, however, runtime authors SHOULD // consider proposing new typed fields for any new features instead. - // - // 1. Seccomp - // - // key: security.alpha.kubernetes.io/seccomp/pod - // description: the seccomp profile for the containers of an entire pod. - // value: see below. - // - // key: security.alpha.kubernetes.io/seccomp/container/ - // description: the seccomp profile for the container (overrides pod). - // value: see below - // - // The value of seccomp is runtime agnostic: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed to the node's - // local seccomp profile root. Note that profile root is set in - // kubelet, and it is not passed in CRI yet, see https://issues.k8s.io/36997. - // map annotations = 7; // Optional configurations specific to Linux hosts. LinuxPodSandboxConfig linux = 8; @@ -522,6 +502,12 @@ message LinuxContainerSecurityContext { // (localhost) by name. The possible profile names are detailed at // http://wiki.apparmor.net/index.php/AppArmor_Core_Policy_Reference string apparmor_profile = 9; + // Seccomp profile for the container, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + string seccomp_profile_path = 10; } // LinuxContainerConfig contains platform-specific configuration for diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index f0392063e09..cdbe96a27e4 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -225,3 +225,16 @@ func (cc *cadvisorClient) getFsInfo(label string) (cadvisorapiv2.FsInfo, error) func (cc *cadvisorClient) WatchEvents(request *events.Request) (*events.EventChannel, error) { return cc.WatchForEvents(request) } + +// HasDedicatedImageFs returns true if the imagefs has a dedicated device. +func (cc *cadvisorClient) HasDedicatedImageFs() (bool, error) { + imageFsInfo, err := cc.ImagesFsInfo() + if err != nil { + return false, err + } + rootFsInfo, err := cc.RootFsInfo() + if err != nil { + return false, err + } + return imageFsInfo.Device != rootFsInfo.Device, nil +} diff --git a/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/pkg/kubelet/cadvisor/cadvisor_unsupported.go index 23378242775..788629c3392 100644 --- a/pkg/kubelet/cadvisor/cadvisor_unsupported.go +++ b/pkg/kubelet/cadvisor/cadvisor_unsupported.go @@ -76,3 +76,7 @@ func (cu *cadvisorUnsupported) RootFsInfo() (cadvisorapiv2.FsInfo, error) { func (cu *cadvisorUnsupported) WatchEvents(request *events.Request) (*events.EventChannel, error) { return nil, unsupportedErr } + +func (cu *cadvisorUnsupported) HasDedicatedImageFs() (bool, error) { + return false, unsupportedErr +} diff --git a/pkg/kubelet/cadvisor/cadvisor_windows.go b/pkg/kubelet/cadvisor/cadvisor_windows.go index 832208658fc..f7e265ec678 100644 --- a/pkg/kubelet/cadvisor/cadvisor_windows.go +++ b/pkg/kubelet/cadvisor/cadvisor_windows.go @@ -73,3 +73,7 @@ func (cu *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) { func (cu *cadvisorClient) WatchEvents(request *events.Request) (*events.EventChannel, error) { return &events.EventChannel{}, nil } + +func (cu *cadvisorClient) HasDedicatedImageFs() (bool, error) { + return false, nil +} diff --git a/pkg/kubelet/cadvisor/testing/cadvisor_fake.go b/pkg/kubelet/cadvisor/testing/cadvisor_fake.go index 894c6f0c976..64566712504 100644 --- a/pkg/kubelet/cadvisor/testing/cadvisor_fake.go +++ b/pkg/kubelet/cadvisor/testing/cadvisor_fake.go @@ -73,3 +73,7 @@ func (c *Fake) RootFsInfo() (cadvisorapiv2.FsInfo, error) { func (c *Fake) WatchEvents(request *events.Request) (*events.EventChannel, error) { return new(events.EventChannel), nil } + +func (c *Fake) HasDedicatedImageFs() (bool, error) { + return false, nil +} diff --git a/pkg/kubelet/cadvisor/testing/cadvisor_mock.go b/pkg/kubelet/cadvisor/testing/cadvisor_mock.go index a6ba72c8784..7848039178a 100644 --- a/pkg/kubelet/cadvisor/testing/cadvisor_mock.go +++ b/pkg/kubelet/cadvisor/testing/cadvisor_mock.go @@ -83,3 +83,8 @@ func (c *Mock) WatchEvents(request *events.Request) (*events.EventChannel, error args := c.Called() return args.Get(0).(*events.EventChannel), args.Error(1) } + +func (c *Mock) HasDedicatedImageFs() (bool, error) { + args := c.Called() + return args.Get(0).(bool), args.Error(1) +} diff --git a/pkg/kubelet/cadvisor/types.go b/pkg/kubelet/cadvisor/types.go index de7d334c1b7..2a97ba35237 100644 --- a/pkg/kubelet/cadvisor/types.go +++ b/pkg/kubelet/cadvisor/types.go @@ -41,4 +41,7 @@ type Interface interface { // Get events streamed through passedChannel that fit the request. WatchEvents(request *events.Request) (*events.EventChannel, error) + + // HasDedicatedImageFs returns true iff a dedicated image filesystem exists for storing images. + HasDedicatedImageFs() (bool, error) } diff --git a/pkg/kubelet/certificate/BUILD b/pkg/kubelet/certificate/BUILD index c2d62856b98..48dc8fc8fb2 100644 --- a/pkg/kubelet/certificate/BUILD +++ b/pkg/kubelet/certificate/BUILD @@ -13,15 +13,19 @@ go_library( srcs = [ "certificate_manager.go", "certificate_store.go", + "kubelet.go", ], tags = ["automanaged"], deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", diff --git a/pkg/kubelet/certificate/kubelet.go b/pkg/kubelet/certificate/kubelet.go new file mode 100644 index 00000000000..a0a76b52df1 --- /dev/null +++ b/pkg/kubelet/certificate/kubelet.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "net" + + certificates "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + clientcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1" +) + +// NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate +// or returns an error. +func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *componentconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string) (Manager, error) { + var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface + if kubeClient != nil && kubeClient.Certificates() != nil { + certSigningRequestClient = kubeClient.Certificates().CertificateSigningRequests() + } + certificateStore, err := NewFileStore( + "kubelet-server", + kubeCfg.CertDirectory, + kubeCfg.CertDirectory, + kubeCfg.TLSCertFile, + kubeCfg.TLSPrivateKeyFile) + if err != nil { + return nil, fmt.Errorf("failed to initialize server certificate store: %v", err) + } + m, err := NewManager(&Config{ + CertificateSigningRequestClient: certSigningRequestClient, + Template: &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: fmt.Sprintf("system:node:%s", nodeName), + Organization: []string{"system:nodes"}, + }, + DNSNames: hostnames, + IPAddresses: ips, + }, + Usages: []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // Digital signature allows the certificate to be used to verify + // digital signatures used during TLS negotiation. + certificates.UsageDigitalSignature, + // KeyEncipherment allows the cert/key pair to be used to encrypt + // keys, including the symetric keys negotiated during TLS setup + // and used for data transfer. + certificates.UsageKeyEncipherment, + // ServerAuth allows the cert to be used by a TLS server to + // authenticate itself to a TLS client. + certificates.UsageServerAuth, + }, + CertificateStore: certificateStore, + }) + if err != nil { + return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err) + } + return m, nil +} + +// NewKubeletClientCertificateManager sets up a certificate manager without a +// client that can be used to sign new certificates (or rotate). It answers with +// whatever certificate it is initialized with. If a CSR client is set later, it +// may begin rotating/renewing the client cert +func NewKubeletClientCertificateManager(certDirectory string, nodeName types.NodeName, certData []byte, keyData []byte, certFile string, keyFile string) (Manager, error) { + certificateStore, err := NewFileStore( + "kubelet-client", + certDirectory, + certDirectory, + certFile, + keyFile) + if err != nil { + return nil, fmt.Errorf("failed to initialize client certificate store: %v", err) + } + m, err := NewManager(&Config{ + Template: &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: fmt.Sprintf("system:node:%s", nodeName), + Organization: []string{"system:nodes"}, + }, + }, + Usages: []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // DigitalSignature allows the certificate to be used to verify + // digital signatures including signatures used during TLS + // negotiation. + certificates.UsageDigitalSignature, + // KeyEncipherment allows the cert/key pair to be used to encrypt + // keys, including the symetric keys negotiated during TLS setup + // and used for data transfer.. + certificates.UsageKeyEncipherment, + // ClientAuth allows the cert to be used by a TLS client to + // authenticate itself to the TLS server. + certificates.UsageClientAuth, + }, + CertificateStore: certificateStore, + BootstrapCertificatePEM: certData, + BootstrapKeyPEM: keyData, + }) + if err != nil { + return nil, fmt.Errorf("failed to initialize client certificate manager: %v", err) + } + return m, nil +} diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 39317309f9f..b78b05d5d3a 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -40,6 +40,7 @@ go_library( "//pkg/util/sysctl:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 467a97ca045..77e5dd187e4 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -60,6 +60,9 @@ type ContainerManager interface { // GetNodeAllocatable returns the amount of compute resources that have to be reserved from scheduling. GetNodeAllocatableReservation() v1.ResourceList + // GetCapacity returns the amount of compute resources tracked by container manager available on the node. + GetCapacity() v1.ResourceList + // UpdateQOSCgroups performs housekeeping updates to ensure that the top // level QoS containers have their desired state in a thread-safe way UpdateQOSCgroups() error diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 8a144c4c03a..0e3187757f5 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -30,6 +30,7 @@ import ( "time" "github.com/golang/glog" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" @@ -219,6 +220,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I var capacity = v1.ResourceList{} // It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because // machine info is computed and cached once as part of cAdvisor object creation. + // But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts if info, err := cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } else { @@ -532,6 +534,44 @@ func (cm *containerManagerImpl) Start(node *v1.Node, activePods ActivePodsFunc) }, 5*time.Minute, wait.NeverStop) } + // Local storage filesystem information from `RootFsInfo` and `ImagesFsInfo` is available at a later time + // depending on the time when cadvisor manager updates container stats. Therefore use a go routine to keep + // retrieving the information until it is available. + stopChan := make(chan struct{}) + go wait.Until(func() { + if err := cm.setFsCapacity(); err != nil { + glog.Errorf("[ContainerManager]: %v", err) + return + } + close(stopChan) + }, time.Second, stopChan) + return nil +} + +func (cm *containerManagerImpl) setFsCapacity() error { + rootfs, err := cm.cadvisorInterface.RootFsInfo() + if err != nil { + return fmt.Errorf("Fail to get rootfs information %v", err) + } + hasDedicatedImageFs, _ := cm.cadvisorInterface.HasDedicatedImageFs() + var imagesfs cadvisorapiv2.FsInfo + if hasDedicatedImageFs { + imagesfs, err = cm.cadvisorInterface.ImagesFsInfo() + if err != nil { + return fmt.Errorf("Fail to get imagefs information %v", err) + } + } + + cm.Lock() + for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) { + cm.capacity[rName] = rCap + } + if hasDedicatedImageFs { + for rName, rCap := range cadvisor.StorageOverlayCapacityFromFsInfo(imagesfs) { + cm.capacity[rName] = rCap + } + } + cm.Unlock() return nil } @@ -790,6 +830,8 @@ func getDockerAPIVersion(cadvisor cadvisor.Interface) *utilversion.Version { return dockerAPIVersion } -func (m *containerManagerImpl) GetCapacity() v1.ResourceList { - return m.capacity +func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { + cm.RLock() + defer cm.RUnlock() + return cm.capacity } diff --git a/pkg/kubelet/cm/container_manager_linux_test.go b/pkg/kubelet/cm/container_manager_linux_test.go index 12219d0b9b9..d1cba37ae47 100644 --- a/pkg/kubelet/cm/container_manager_linux_test.go +++ b/pkg/kubelet/cm/container_manager_linux_test.go @@ -47,6 +47,14 @@ func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { return mi.mountPoints, nil } +func (mi *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mi *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { return false, fmt.Errorf("unsupported") } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index caa9c611d5a..941913aceee 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -58,6 +58,10 @@ func (cm *containerManagerStub) GetNodeAllocatableReservation() v1.ResourceList return nil } +func (cm *containerManagerStub) GetCapacity() v1.ResourceList { + return nil +} + func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager { return &podContainerManagerStub{} } diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go index 5a1a2f76d6d..a9984b9e25d 100644 --- a/pkg/kubelet/cm/container_manager_unsupported.go +++ b/pkg/kubelet/cm/container_manager_unsupported.go @@ -64,6 +64,10 @@ func (cm *unsupportedContainerManager) GetNodeAllocatableReservation() v1.Resour return nil } +func (cm *unsupportedContainerManager) GetCapacity() v1.ResourceList { + return nil +} + func (cm *unsupportedContainerManager) NewPodContainerManager() PodContainerManager { return &unsupportedPodContainerManager{} } diff --git a/pkg/kubelet/cm/container_manager_unsupported_test.go b/pkg/kubelet/cm/container_manager_unsupported_test.go index 11d70260e9d..d1451946fc0 100644 --- a/pkg/kubelet/cm/container_manager_unsupported_test.go +++ b/pkg/kubelet/cm/container_manager_unsupported_test.go @@ -40,6 +40,14 @@ func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { return mi.mountPoints, nil } +func (f *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (f *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { return false, fmt.Errorf("unsupported") } diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 0d415728058..39f445a9431 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -86,10 +86,7 @@ func MilliCPUToShares(milliCPU int64) int64 { // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { // sum requests and limits. - reqs, limits, err := resource.PodRequestsAndLimits(pod) - if err != nil { - return &ResourceConfig{} - } + reqs, limits := resource.PodRequestsAndLimits(pod) cpuRequests := int64(0) cpuLimits := int64(0) diff --git a/pkg/kubelet/cm/node_container_manager.go b/pkg/kubelet/cm/node_container_manager.go index 11f321ed8c3..bb6359f2d4e 100644 --- a/pkg/kubelet/cm/node_container_manager.go +++ b/pkg/kubelet/cm/node_container_manager.go @@ -29,7 +29,6 @@ import ( clientv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/events" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" ) @@ -184,17 +183,6 @@ func (cm *containerManagerImpl) getNodeAllocatableAbsolute() v1.ResourceList { // GetNodeAllocatable returns amount of compute or storage resource that have to be reserved on this node from scheduling. func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity) - if _, ok := cm.capacity[v1.ResourceStorage]; !ok { - if cm.cadvisorInterface != nil { - if rootfs, err := cm.cadvisorInterface.RootFsInfo(); err == nil { - for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) { - cm.capacity[rName] = rCap - } - } else { - glog.Warning("Error getting rootfs info: %v", err) - } - } - } result := make(v1.ResourceList) for k := range cm.capacity { value := resource.NewQuantity(0, resource.DecimalSI) diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index 80aebbc1912..aeb92f5a0ed 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -148,10 +148,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] // we only care about the burstable qos tier continue } - req, _, err := resource.PodRequestsAndLimits(pod) - if err != nil { - return err - } + req, _ := resource.PodRequestsAndLimits(pod) if request, found := req[v1.ResourceCPU]; found { burstablePodCPURequest += request.MilliValue() } @@ -188,11 +185,7 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C // limits are not set for Best Effort pods continue } - req, _, err := resource.PodRequestsAndLimits(pod) - if err != nil { - glog.V(2).Infof("[Container Manager] Pod resource requests/limits could not be determined. Not setting QOS memory limts.") - return - } + req, _ := resource.PodRequestsAndLimits(pod) if request, found := req[v1.ResourceMemory]; found { podMemoryRequest += request.Value() } diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 52fce69d3fa..283e37470a5 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -110,12 +110,14 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v if err != nil { return false, pod, err } + + newPod, ok := obj.(*api.Pod) // Check whether the object could be converted to single pod. - if _, ok := obj.(*api.Pod); !ok { + if !ok { err = fmt.Errorf("invalid pod: %#v", obj) return false, pod, err } - newPod := obj.(*api.Pod) + // Apply default values and validate the pod. if err = defaultFn(newPod); err != nil { return true, pod, err @@ -136,12 +138,14 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1. if err != nil { return false, pods, err } + + newPods, ok := obj.(*api.PodList) // Check whether the object could be converted to list of pods. - if _, ok := obj.(*api.PodList); !ok { + if !ok { err = fmt.Errorf("invalid pods list: %#v", obj) return false, pods, err } - newPods := obj.(*api.PodList) + // Apply default values and validate pods. for i := range newPods.Items { newPod := &newPods.Items[i] diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index 01e569f532a..7d58d190b7a 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -23,6 +23,7 @@ import ( "os" "path/filepath" "sort" + "strings" "time" "github.com/golang/glog" @@ -44,6 +45,9 @@ type sourceFile struct { } func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) { + // "golang.org/x/exp/inotify" requires a path without trailing "/" + path = strings.TrimRight(path, string(os.PathSeparator)) + config := new(path, nodeName, period, updates) glog.V(1).Infof("Watching path %q", path) go wait.Forever(config.run, period) diff --git a/pkg/kubelet/container/container_reference_manager.go b/pkg/kubelet/container/container_reference_manager.go index a3e574b7f81..d41d05a9332 100644 --- a/pkg/kubelet/container/container_reference_manager.go +++ b/pkg/kubelet/container/container_reference_manager.go @@ -38,7 +38,6 @@ func NewRefManager() *RefManager { } // SetRef stores a reference to a pod's container, associating it with the given container ID. -// TODO: move this to client-go v1.ObjectReference func (c *RefManager) SetRef(id ContainerID, ref *v1.ObjectReference) { c.Lock() defer c.Unlock() @@ -53,7 +52,6 @@ func (c *RefManager) ClearRef(id ContainerID) { } // GetRef returns the container reference of the given ID, or (nil, false) if none is stored. -// TODO: move this to client-go v1.ObjectReference func (c *RefManager) GetRef(id ContainerID) (ref *v1.ObjectReference, ok bool) { c.RLock() defer c.RUnlock() diff --git a/pkg/kubelet/dockershim/BUILD b/pkg/kubelet/dockershim/BUILD index 915b6b8485d..ddf32974059 100644 --- a/pkg/kubelet/dockershim/BUILD +++ b/pkg/kubelet/dockershim/BUILD @@ -27,6 +27,7 @@ go_library( "helpers_linux.go", "naming.go", "security_context.go", + "selinux_util.go", ], tags = ["automanaged"], deps = [ @@ -39,7 +40,6 @@ go_library( "//pkg/kubelet/dockershim/cm:go_default_library", "//pkg/kubelet/dockershim/errors:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", - "//pkg/kubelet/dockershim/securitycontext:go_default_library", "//pkg/kubelet/leaky:go_default_library", "//pkg/kubelet/network:go_default_library", "//pkg/kubelet/network/cni:go_default_library", @@ -55,11 +55,11 @@ go_library( "//pkg/util/hash:go_default_library", "//pkg/util/term:go_default_library", "//vendor/github.com/blang/semver:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types/container:go_default_library", + "//vendor/github.com/docker/docker/api/types/filters:go_default_library", + "//vendor/github.com/docker/docker/api/types/strslice:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", - "//vendor/github.com/docker/engine-api/types:go_default_library", - "//vendor/github.com/docker/engine-api/types/container:go_default_library", - "//vendor/github.com/docker/engine-api/types/filters:go_default_library", - "//vendor/github.com/docker/engine-api/types/strslice:go_default_library", "//vendor/github.com/docker/go-connections/nat:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -86,6 +86,7 @@ go_test( "helpers_test.go", "naming_test.go", "security_context_test.go", + "selinux_util_test.go", ], data = [ "fixtures/seccomp/sub/subtest", @@ -99,7 +100,6 @@ go_test( "//pkg/kubelet/container/testing:go_default_library", "//pkg/kubelet/dockershim/errors:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", - "//pkg/kubelet/dockershim/securitycontext:go_default_library", "//pkg/kubelet/dockershim/testing:go_default_library", "//pkg/kubelet/network:go_default_library", "//pkg/kubelet/network/testing:go_default_library", @@ -107,9 +107,9 @@ go_test( "//pkg/kubelet/util/cache:go_default_library", "//pkg/security/apparmor:go_default_library", "//vendor/github.com/blang/semver:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types/container:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", - "//vendor/github.com/docker/engine-api/types:go_default_library", - "//vendor/github.com/docker/engine-api/types/container:go_default_library", "//vendor/github.com/docker/go-connections/nat:go_default_library", "//vendor/github.com/golang/mock/gomock:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -134,7 +134,6 @@ filegroup( "//pkg/kubelet/dockershim/errors:all-srcs", "//pkg/kubelet/dockershim/libdocker:all-srcs", "//pkg/kubelet/dockershim/remote:all-srcs", - "//pkg/kubelet/dockershim/securitycontext:all-srcs", "//pkg/kubelet/dockershim/testing:all-srcs", ], tags = ["automanaged"], diff --git a/pkg/kubelet/dockershim/convert.go b/pkg/kubelet/dockershim/convert.go index 0789301839d..0ce30959a74 100644 --- a/pkg/kubelet/dockershim/convert.go +++ b/pkg/kubelet/dockershim/convert.go @@ -21,7 +21,7 @@ import ( "strings" "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" @@ -30,7 +30,7 @@ import ( // This file contains helper functions to convert docker API types to runtime // API types, or vice versa. -func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeapi.Image, error) { +func imageToRuntimeAPIImage(image *dockertypes.ImageSummary) (*runtimeapi.Image, error) { if image == nil { return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image") } diff --git a/pkg/kubelet/dockershim/convert_test.go b/pkg/kubelet/dockershim/convert_test.go index a1b4e4cca80..a05ab9dd915 100644 --- a/pkg/kubelet/dockershim/convert_test.go +++ b/pkg/kubelet/dockershim/convert_test.go @@ -19,7 +19,7 @@ package dockershim import ( "testing" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/stretchr/testify/assert" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" diff --git a/pkg/kubelet/dockershim/docker_checkpoint.go b/pkg/kubelet/dockershim/docker_checkpoint.go index 6d0136c56fa..6ad1d794169 100644 --- a/pkg/kubelet/dockershim/docker_checkpoint.go +++ b/pkg/kubelet/dockershim/docker_checkpoint.go @@ -50,6 +50,7 @@ type PortMapping struct { // CheckpointData contains all types of data that can be stored in the checkpoint. type CheckpointData struct { PortMappings []*PortMapping `json:"port_mappings,omitempty"` + HostNetwork bool `json:"host_network,omitempty"` } // PodSandboxCheckpoint is the checkpoint structure for a sandbox diff --git a/pkg/kubelet/dockershim/docker_checkpoint_test.go b/pkg/kubelet/dockershim/docker_checkpoint_test.go index 477c9204d90..c10b8f1e502 100644 --- a/pkg/kubelet/dockershim/docker_checkpoint_test.go +++ b/pkg/kubelet/dockershim/docker_checkpoint_test.go @@ -48,18 +48,22 @@ func TestPersistentCheckpointHandler(t *testing.T) { &port443, }, } + checkpoint1.Data.HostNetwork = true checkpoints := []struct { - podSandboxID string - checkpoint *PodSandboxCheckpoint + podSandboxID string + checkpoint *PodSandboxCheckpoint + expectHostNetwork bool }{ { "id1", checkpoint1, + true, }, { "id2", NewPodSandboxCheckpoint("ns2", "sandbox2"), + false, }, } @@ -72,6 +76,7 @@ func TestPersistentCheckpointHandler(t *testing.T) { checkpoint, err := handler.GetCheckpoint(tc.podSandboxID) assert.NoError(t, err) assert.Equal(t, *checkpoint, *tc.checkpoint) + assert.Equal(t, checkpoint.Data.HostNetwork, tc.expectHostNetwork) } // Test ListCheckpoints keys, err := handler.ListCheckpoints() diff --git a/pkg/kubelet/dockershim/docker_container.go b/pkg/kubelet/dockershim/docker_container.go index ac79f3fe64a..5ee0d8e539d 100644 --- a/pkg/kubelet/dockershim/docker_container.go +++ b/pkg/kubelet/dockershim/docker_container.go @@ -22,10 +22,10 @@ import ( "path/filepath" "time" - dockertypes "github.com/docker/engine-api/types" - dockercontainer "github.com/docker/engine-api/types/container" - dockerfilters "github.com/docker/engine-api/types/filters" - dockerstrslice "github.com/docker/engine-api/types/strslice" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerfilters "github.com/docker/docker/api/types/filters" + dockerstrslice "github.com/docker/docker/api/types/strslice" "github.com/golang/glog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -36,8 +36,8 @@ import ( func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { opts := dockertypes.ContainerListOptions{All: true} - opts.Filter = dockerfilters.NewArgs() - f := newDockerFilter(&opts.Filter) + opts.Filters = dockerfilters.NewArgs() + f := newDockerFilter(&opts.Filters) // Add filter to get *only* (non-sandbox) containers. f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer) @@ -249,7 +249,7 @@ func (ds *dockerService) StartContainer(containerID string) error { // StopContainer stops a running container with a grace period (i.e., timeout). func (ds *dockerService) StopContainer(containerID string, timeout int64) error { - return ds.client.StopContainer(containerID, int(timeout)) + return ds.client.StopContainer(containerID, time.Duration(timeout)*time.Second) } // RemoveContainer removes the container. diff --git a/pkg/kubelet/dockershim/docker_container_test.go b/pkg/kubelet/dockershim/docker_container_test.go index 6a1bb97005e..a8b70925b62 100644 --- a/pkg/kubelet/dockershim/docker_container_test.go +++ b/pkg/kubelet/dockershim/docker_container_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -131,7 +131,7 @@ func TestContainerStatus(t *testing.T) { Annotations: config.Annotations, } - fDocker.InjectImages([]dockertypes.Image{{ID: imageName}}) + fDocker.InjectImages([]dockertypes.ImageSummary{{ID: imageName}}) // Create the container. fClock.SetTime(time.Now().Add(-1 * time.Hour)) diff --git a/pkg/kubelet/dockershim/docker_image.go b/pkg/kubelet/dockershim/docker_image.go index a2eb8dce158..5ad5c1baf30 100644 --- a/pkg/kubelet/dockershim/docker_image.go +++ b/pkg/kubelet/dockershim/docker_image.go @@ -20,8 +20,8 @@ import ( "fmt" "net/http" + dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/jsonmessage" - dockertypes "github.com/docker/engine-api/types" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" @@ -34,7 +34,7 @@ func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimea opts := dockertypes.ImageListOptions{} if filter != nil { if imgSpec := filter.GetImage(); imgSpec != nil { - opts.MatchName = imgSpec.Image + opts.Filters.Add("reference", imgSpec.Image) } } diff --git a/pkg/kubelet/dockershim/docker_image_test.go b/pkg/kubelet/dockershim/docker_image_test.go index 4d6d37f4166..d9e3c853244 100644 --- a/pkg/kubelet/dockershim/docker_image_test.go +++ b/pkg/kubelet/dockershim/docker_image_test.go @@ -20,8 +20,8 @@ import ( "fmt" "testing" + dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/jsonmessage" - dockertypes "github.com/docker/engine-api/types" "github.com/stretchr/testify/assert" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" diff --git a/pkg/kubelet/dockershim/docker_legacy.go b/pkg/kubelet/dockershim/docker_legacy.go index d5d26be47d2..24ebbe3d98b 100644 --- a/pkg/kubelet/dockershim/docker_legacy.go +++ b/pkg/kubelet/dockershim/docker_legacy.go @@ -22,8 +22,8 @@ import ( "sync/atomic" "time" - dockertypes "github.com/docker/engine-api/types" - dockerfilters "github.com/docker/engine-api/types/filters" + dockertypes "github.com/docker/docker/api/types" + dockerfilters "github.com/docker/docker/api/types/filters" "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" @@ -155,9 +155,9 @@ func (ds *dockerService) checkLegacyCleanup() (bool, error) { // ListLegacyPodSandbox only lists all legacy pod sandboxes. func (ds *dockerService) ListLegacyPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { // By default, list all containers whether they are running or not. - opts := dockertypes.ContainerListOptions{All: true, Filter: dockerfilters.NewArgs()} + opts := dockertypes.ContainerListOptions{All: true, Filters: dockerfilters.NewArgs()} filterOutReadySandboxes := false - f := newDockerFilter(&opts.Filter) + f := newDockerFilter(&opts.Filters) if filter != nil { if filter.Id != "" { f.Add("id", filter.Id) @@ -218,8 +218,8 @@ func (ds *dockerService) ListLegacyPodSandbox(filter *runtimeapi.PodSandboxFilte // ListLegacyPodSandbox only lists all legacy containers. func (ds *dockerService) ListLegacyContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { - opts := dockertypes.ContainerListOptions{All: true, Filter: dockerfilters.NewArgs()} - f := newDockerFilter(&opts.Filter) + opts := dockertypes.ContainerListOptions{All: true, Filters: dockerfilters.NewArgs()} + f := newDockerFilter(&opts.Filters) if filter != nil { if filter.Id != "" { diff --git a/pkg/kubelet/dockershim/docker_legacy_test.go b/pkg/kubelet/dockershim/docker_legacy_test.go index 0b4551b60ba..cc56d47d88d 100644 --- a/pkg/kubelet/dockershim/docker_legacy_test.go +++ b/pkg/kubelet/dockershim/docker_legacy_test.go @@ -19,7 +19,7 @@ package dockershim import ( "testing" - dockercontainer "github.com/docker/engine-api/types/container" + dockercontainer "github.com/docker/docker/api/types/container" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index 70c5bcb0ded..336f13a7409 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -20,10 +20,11 @@ import ( "fmt" "os" "strings" + "time" - dockertypes "github.com/docker/engine-api/types" - dockercontainer "github.com/docker/engine-api/types/container" - dockerfilters "github.com/docker/engine-api/types/filters" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerfilters "github.com/docker/docker/api/types/filters" "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -41,13 +42,15 @@ const ( // Various default sandbox resources requests/limits. defaultSandboxCPUshares int64 = 2 - // Termination grace period - defaultSandboxGracePeriod int = 10 - // Name of the underlying container runtime runtimeName = "docker" ) +var ( + // Termination grace period + defaultSandboxGracePeriod = time.Duration(10) * time.Second +) + // Returns whether the sandbox network is ready, and whether the sandbox is known func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) { ds.networkReadyLock.Lock() @@ -171,14 +174,14 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id // after us? func (ds *dockerService) StopPodSandbox(podSandboxID string) error { var namespace, name string + var hostNetwork bool var checkpointErr, statusErr error - needNetworkTearDown := false // Try to retrieve sandbox information from docker daemon or sandbox checkpoint status, statusErr := ds.PodSandboxStatus(podSandboxID) if statusErr == nil { nsOpts := status.GetLinux().GetNamespaces().GetOptions() - needNetworkTearDown = nsOpts != nil && !nsOpts.HostNetwork + hostNetwork = nsOpts != nil && nsOpts.HostNetwork m := status.GetMetadata() namespace = m.Namespace name = m.Name @@ -211,10 +214,8 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { } else { namespace = checkpoint.Namespace name = checkpoint.Name + hostNetwork = checkpoint.Data != nil && checkpoint.Data.HostNetwork } - - // Always trigger network plugin to tear down - needNetworkTearDown = true } // WARNING: The following operations made the following assumption: @@ -226,7 +227,7 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { // since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best // effort clean up and will not return error. errList := []error{} - if needNetworkTearDown { + if !hostNetwork { cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID) err := ds.network.TearDownPod(namespace, name, cID) if err == nil { @@ -252,8 +253,8 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error { var errs []error opts := dockertypes.ContainerListOptions{All: true} - opts.Filter = dockerfilters.NewArgs() - f := newDockerFilter(&opts.Filter) + opts.Filters = dockerfilters.NewArgs() + f := newDockerFilter(&opts.Filters) f.AddLabel(sandboxIDLabelKey, podSandboxID) containers, err := ds.client.ListContainers(opts) @@ -416,8 +417,8 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([] opts := dockertypes.ContainerListOptions{All: true} filterOutReadySandboxes := false - opts.Filter = dockerfilters.NewArgs() - f := newDockerFilter(&opts.Filter) + opts.Filters = dockerfilters.NewArgs() + f := newDockerFilter(&opts.Filters) // Add filter to select only sandbox containers. f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox) @@ -642,6 +643,9 @@ func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) *PodSand Protocol: &proto, }) } + if nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions(); nsOptions != nil { + checkpoint.Data.HostNetwork = nsOptions.HostNetwork + } return checkpoint } diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index ac1f80ddf71..05b73394aa5 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -25,7 +25,7 @@ import ( "time" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -455,10 +455,13 @@ func (ds *dockerService) getDockerVersionFromCache() (*dockertypes.Version, erro // We only store on key in the cache. const dummyKey = "version" value, err := ds.versionCache.Get(dummyKey) - dv := value.(*dockertypes.Version) if err != nil { return nil, err } + dv, ok := value.(*dockertypes.Version) + if !ok { + return nil, fmt.Errorf("Converted to *dockertype.Version error") + } return dv, nil } diff --git a/pkg/kubelet/dockershim/docker_service_test.go b/pkg/kubelet/dockershim/docker_service_test.go index 35cd5b76a93..04bf5291bfb 100644 --- a/pkg/kubelet/dockershim/docker_service_test.go +++ b/pkg/kubelet/dockershim/docker_service_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/kubelet/dockershim/docker_streaming.go b/pkg/kubelet/dockershim/docker_streaming.go index 70f9d105619..cf599d8a9fe 100644 --- a/pkg/kubelet/dockershim/docker_streaming.go +++ b/pkg/kubelet/dockershim/docker_streaming.go @@ -25,7 +25,7 @@ import ( "strings" "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" @@ -121,7 +121,7 @@ func (ds *dockerService) PortForward(req *runtimeapi.PortForwardRequest) (*runti if err != nil { return nil, err } - // TODO(timstclair): Verify that ports are exposed. + // TODO(tallclair): Verify that ports are exposed. return ds.streamingServer.GetPortForward(req) } @@ -140,7 +140,7 @@ func attachContainer(client libdocker.Interface, containerID string, stdin io.Re // Have to start this before the call to client.AttachToContainer because client.AttachToContainer is a blocking // call :-( Otherwise, resize events don't get processed and the terminal never resizes. kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) { - client.ResizeContainerTTY(containerID, int(size.Height), int(size.Width)) + client.ResizeContainerTTY(containerID, uint(size.Height), uint(size.Width)) }) // TODO(random-liu): Do we really use the *Logs* field here? diff --git a/pkg/kubelet/dockershim/exec.go b/pkg/kubelet/dockershim/exec.go index 1d73a8a8d5c..881afea6c34 100644 --- a/pkg/kubelet/dockershim/exec.go +++ b/pkg/kubelet/dockershim/exec.go @@ -23,7 +23,7 @@ import ( "os/exec" "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" "k8s.io/client-go/tools/remotecommand" @@ -150,7 +150,7 @@ func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container // Have to start this before the call to client.StartExec because client.StartExec is a blocking // call :-( Otherwise, resize events don't get processed and the terminal never resizes. kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) { - client.ResizeExecTTY(execObj.ID, int(size.Height), int(size.Width)) + client.ResizeExecTTY(execObj.ID, uint(size.Height), uint(size.Width)) }) startOpts := dockertypes.ExecStartCheck{Detach: false, Tty: tty} diff --git a/pkg/kubelet/dockershim/helpers.go b/pkg/kubelet/dockershim/helpers.go index 22a976ed157..c82da109280 100644 --- a/pkg/kubelet/dockershim/helpers.go +++ b/pkg/kubelet/dockershim/helpers.go @@ -25,8 +25,9 @@ import ( "strings" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" - dockerfilters "github.com/docker/engine-api/types/filters" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerfilters "github.com/docker/docker/api/types/filters" dockernat "github.com/docker/go-connections/nat" "github.com/golang/glog" @@ -152,8 +153,8 @@ func generateMountBindings(mounts []*runtimeapi.Mount) []string { return result } -func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) { - exposedPorts := map[dockernat.Port]struct{}{} +func makePortsAndBindings(pm []*runtimeapi.PortMapping) (dockernat.PortSet, map[dockernat.Port][]dockernat.PortBinding) { + exposedPorts := dockernat.PortSet{} portBindings := map[dockernat.Port][]dockernat.PortBinding{} for _, port := range pm { exteriorPort := port.HostPort @@ -280,7 +281,7 @@ func getUserFromImageUser(imageUser string) (*int64, string) { // In that case we have to create the container with a randomized name. // TODO(random-liu): Remove this work around after docker 1.11 is deprecated. // TODO(#33189): Monitor the tests to see if the fix is sufficient. -func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfig dockertypes.ContainerCreateConfig, err error) (*dockertypes.ContainerCreateResponse, error) { +func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfig dockertypes.ContainerCreateConfig, err error) (*dockercontainer.ContainerCreateCreatedBody, error) { matches := conflictRE.FindStringSubmatch(err.Error()) if len(matches) != 2 { return nil, err diff --git a/pkg/kubelet/dockershim/helpers_linux.go b/pkg/kubelet/dockershim/helpers_linux.go index fd4f87fe4dc..6a347a0e7ea 100644 --- a/pkg/kubelet/dockershim/helpers_linux.go +++ b/pkg/kubelet/dockershim/helpers_linux.go @@ -28,8 +28,8 @@ import ( "strings" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" - dockercontainer "github.com/docker/engine-api/types/container" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" "k8s.io/api/core/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) diff --git a/pkg/kubelet/dockershim/helpers_test.go b/pkg/kubelet/dockershim/helpers_test.go index 4e0615834e5..935cda889c9 100644 --- a/pkg/kubelet/dockershim/helpers_test.go +++ b/pkg/kubelet/dockershim/helpers_test.go @@ -25,7 +25,7 @@ import ( "testing" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" dockernat "github.com/docker/go-connections/nat" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -218,7 +218,7 @@ func TestEnsureSandboxImageExists(t *testing.T) { t.Logf("TestCase: %q", desc) _, fakeDocker, _ := newTestDockerService() if test.injectImage { - images := []dockertypes.Image{{ID: sandboxImage}} + images := []dockertypes.ImageSummary{{ID: sandboxImage}} fakeDocker.InjectImages(images) if test.imgNeedsAuth { fakeDocker.MakeImagesPrivate(images, authConfig) @@ -243,7 +243,7 @@ func TestEnsureSandboxImageExists(t *testing.T) { func TestMakePortsAndBindings(t *testing.T) { for desc, test := range map[string]struct { pm []*runtimeapi.PortMapping - exposedPorts map[dockernat.Port]struct{} + exposedPorts dockernat.PortSet portmappings map[dockernat.Port][]dockernat.PortBinding }{ "no port mapping": { diff --git a/pkg/kubelet/dockershim/helpers_unsupported.go b/pkg/kubelet/dockershim/helpers_unsupported.go index ab8d380adce..1589888147d 100644 --- a/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/pkg/kubelet/dockershim/helpers_unsupported.go @@ -20,7 +20,7 @@ package dockershim import ( "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) diff --git a/pkg/kubelet/dockershim/helpers_windows.go b/pkg/kubelet/dockershim/helpers_windows.go index 917e2dc4a2d..f7735e4320a 100644 --- a/pkg/kubelet/dockershim/helpers_windows.go +++ b/pkg/kubelet/dockershim/helpers_windows.go @@ -22,9 +22,9 @@ import ( "os" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" - dockercontainer "github.com/docker/engine-api/types/container" - dockerfilters "github.com/docker/engine-api/types/filters" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerfilters "github.com/docker/docker/api/types/filters" "github.com/golang/glog" "k8s.io/api/core/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -64,11 +64,11 @@ func (ds *dockerService) updateCreateConfig( func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) string { opts := dockertypes.ContainerListOptions{ - All: true, - Filter: dockerfilters.NewArgs(), + All: true, + Filters: dockerfilters.NewArgs(), } - f := newDockerFilter(&opts.Filter) + f := newDockerFilter(&opts.Filters) f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer) f.AddLabel(sandboxIDLabelKey, sandboxID) containers, err := ds.client.ListContainers(opts) diff --git a/pkg/kubelet/dockershim/libdocker/BUILD b/pkg/kubelet/dockershim/libdocker/BUILD index 110afd6e8dd..fa284b9ed0b 100644 --- a/pkg/kubelet/dockershim/libdocker/BUILD +++ b/pkg/kubelet/dockershim/libdocker/BUILD @@ -19,7 +19,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/util/hash:go_default_library", - "//vendor/github.com/docker/engine-api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -42,11 +42,11 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//vendor/github.com/docker/distribution/digest:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types/container:go_default_library", + "//vendor/github.com/docker/docker/client:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", "//vendor/github.com/docker/docker/pkg/stdcopy:go_default_library", - "//vendor/github.com/docker/engine-api/client:go_default_library", - "//vendor/github.com/docker/engine-api/types:go_default_library", - "//vendor/github.com/docker/engine-api/types/container:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/dockershim/libdocker/client.go b/pkg/kubelet/dockershim/libdocker/client.go index cc71a68247d..0307a82dc53 100644 --- a/pkg/kubelet/dockershim/libdocker/client.go +++ b/pkg/kubelet/dockershim/libdocker/client.go @@ -20,8 +20,9 @@ import ( "strings" "time" - dockerapi "github.com/docker/engine-api/client" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerapi "github.com/docker/docker/client" "github.com/golang/glog" ) @@ -44,25 +45,25 @@ const ( type Interface interface { ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) InspectContainer(id string) (*dockertypes.ContainerJSON, error) - CreateContainer(dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) + CreateContainer(dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) StartContainer(id string) error - StopContainer(id string, timeout int) error + StopContainer(id string, timeout time.Duration) error RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error) InspectImageByID(imageID string) (*dockertypes.ImageInspect, error) - ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) + ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) ImageHistory(id string) ([]dockertypes.ImageHistory, error) Logs(string, dockertypes.ContainerLogsOptions, StreamOptions) error Version() (*dockertypes.Version, error) Info() (*dockertypes.Info, error) - CreateExec(string, dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) + CreateExec(string, dockertypes.ExecConfig) (*dockertypes.IDResponse, error) StartExec(string, dockertypes.ExecStartCheck, StreamOptions) error InspectExec(id string) (*dockertypes.ContainerExecInspect, error) AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error - ResizeContainerTTY(id string, height, width int) error - ResizeExecTTY(id string, height, width int) error + ResizeContainerTTY(id string, height, width uint) error + ResizeExecTTY(id string, height, width uint) error } // Get a *dockerapi.Client, either using the endpoint passed in, or using diff --git a/pkg/kubelet/dockershim/libdocker/fake_client.go b/pkg/kubelet/dockershim/libdocker/fake_client.go index 9d7f37c3332..341dcd484c4 100644 --- a/pkg/kubelet/dockershim/libdocker/fake_client.go +++ b/pkg/kubelet/dockershim/libdocker/fake_client.go @@ -29,8 +29,8 @@ import ( "sync" "time" - dockertypes "github.com/docker/engine-api/types" - dockercontainer "github.com/docker/engine-api/types/container" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/clock" @@ -54,7 +54,7 @@ type FakeDockerClient struct { ExitedContainerList []dockertypes.Container ContainerMap map[string]*dockertypes.ContainerJSON ImageInspects map[string]*dockertypes.ImageInspect - Images []dockertypes.Image + Images []dockertypes.ImageSummary ImageIDsNeedingAuth map[string]dockertypes.AuthConfig Errors map[string]error called []calledDetail @@ -391,8 +391,8 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio // TODO(random-liu): Is a fully sorted array needed? containerList = append(containerList, f.ExitedContainerList...) } - // Filter containers with id, only support 1 id. - idFilters := options.Filter.Get("id") + // Filters containers with id, only support 1 id. + idFilters := options.Filters.Get("id") if len(idFilters) != 0 { var filtered []dockertypes.Container for _, container := range containerList { @@ -405,8 +405,8 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio } containerList = filtered } - // Filter containers with status, only support 1 status. - statusFilters := options.Filter.Get("status") + // Filters containers with status, only support 1 status. + statusFilters := options.Filters.Get("status") if len(statusFilters) == 1 { var filtered []dockertypes.Container for _, container := range containerList { @@ -419,8 +419,8 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio } containerList = filtered } - // Filter containers with label filter. - labelFilters := options.Filter.Get("label") + // Filters containers with label filter. + labelFilters := options.Filters.Get("label") if len(labelFilters) != 0 { var filtered []dockertypes.Container for _, container := range containerList { @@ -514,7 +514,7 @@ func GetFakeContainerID(name string) string { // CreateContainer is a test-spy implementation of Interface.CreateContainer. // It adds an entry "create" to the internal method call record. -func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { +func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { f.Lock() defer f.Unlock() f.appendCalled(calledDetail{name: "create"}) @@ -536,7 +536,7 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) f.normalSleep(100, 25, 25) - return &dockertypes.ContainerCreateResponse{ID: id}, nil + return &dockercontainer.ContainerCreateCreatedBody{ID: id}, nil } // StartContainer is a test-spy implementation of Interface.StartContainer. @@ -566,7 +566,7 @@ func (f *FakeDockerClient) StartContainer(id string) error { // StopContainer is a test-spy implementation of Interface.StopContainer. // It adds an entry "stop" to the internal method call record. -func (f *FakeDockerClient) StopContainer(id string, timeout int) error { +func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error { f.Lock() defer f.Unlock() f.appendCalled(calledDetail{name: "stop"}) @@ -675,12 +675,12 @@ func (f *FakeDockerClient) Info() (*dockertypes.Info, error) { return &f.Information, nil } -func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { +func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) { f.Lock() defer f.Unlock() f.execCmd = opts.Cmd f.appendCalled(calledDetail{name: "create_exec"}) - return &dockertypes.ContainerExecCreateResponse{ID: "12345678"}, nil + return &dockertypes.IDResponse{ID: "12345678"}, nil } func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error { @@ -701,7 +701,7 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns return f.ExecInspect, f.popError("inspect_exec") } -func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { +func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) { f.Lock() defer f.Unlock() f.appendCalled(calledDetail{name: "list_images"}) @@ -725,7 +725,7 @@ func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemov return []dockertypes.ImageDelete{{Deleted: image}}, err } -func (f *FakeDockerClient) InjectImages(images []dockertypes.Image) { +func (f *FakeDockerClient) InjectImages(images []dockertypes.ImageSummary) { f.Lock() defer f.Unlock() f.Images = append(f.Images, images...) @@ -734,7 +734,7 @@ func (f *FakeDockerClient) InjectImages(images []dockertypes.Image) { } } -func (f *FakeDockerClient) MakeImagesPrivate(images []dockertypes.Image, auth dockertypes.AuthConfig) { +func (f *FakeDockerClient) MakeImagesPrivate(images []dockertypes.ImageSummary, auth dockertypes.AuthConfig) { f.Lock() defer f.Unlock() for _, i := range images { @@ -745,7 +745,7 @@ func (f *FakeDockerClient) MakeImagesPrivate(images []dockertypes.Image, auth do func (f *FakeDockerClient) ResetImages() { f.Lock() defer f.Unlock() - f.Images = []dockertypes.Image{} + f.Images = []dockertypes.ImageSummary{} f.ImageInspects = make(map[string]*dockertypes.ImageInspect) f.ImageIDsNeedingAuth = make(map[string]dockertypes.AuthConfig) } @@ -767,14 +767,14 @@ func (f *FakeDockerClient) updateContainerStatus(id, status string) { } } -func (f *FakeDockerClient) ResizeExecTTY(id string, height, width int) error { +func (f *FakeDockerClient) ResizeExecTTY(id string, height, width uint) error { f.Lock() defer f.Unlock() f.appendCalled(calledDetail{name: "resize_exec"}) return nil } -func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width int) error { +func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width uint) error { f.Lock() defer f.Unlock() f.appendCalled(calledDetail{name: "resize_container"}) @@ -792,7 +792,7 @@ func createImageInspectFromRef(ref string) *dockertypes.ImageInspect { } } -func createImageInspectFromImage(image dockertypes.Image) *dockertypes.ImageInspect { +func createImageInspectFromImage(image dockertypes.ImageSummary) *dockertypes.ImageInspect { return &dockertypes.ImageInspect{ ID: image.ID, RepoTags: image.RepoTags, @@ -803,8 +803,8 @@ func createImageInspectFromImage(image dockertypes.Image) *dockertypes.ImageInsp } } -func createImageFromImageInspect(inspect dockertypes.ImageInspect) *dockertypes.Image { - return &dockertypes.Image{ +func createImageFromImageInspect(inspect dockertypes.ImageInspect) *dockertypes.ImageSummary { + return &dockertypes.ImageSummary{ ID: inspect.ID, RepoTags: inspect.RepoTags, // Image size is required to be non-zero for CRI integration. diff --git a/pkg/kubelet/dockershim/libdocker/helpers.go b/pkg/kubelet/dockershim/libdocker/helpers.go index 5957ff5e2ea..88812028bf0 100644 --- a/pkg/kubelet/dockershim/libdocker/helpers.go +++ b/pkg/kubelet/dockershim/libdocker/helpers.go @@ -22,7 +22,7 @@ import ( dockerdigest "github.com/docker/distribution/digest" dockerref "github.com/docker/distribution/reference" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" ) diff --git a/pkg/kubelet/dockershim/libdocker/helpers_test.go b/pkg/kubelet/dockershim/libdocker/helpers_test.go index 430db01f9f1..03e0046b9fc 100644 --- a/pkg/kubelet/dockershim/libdocker/helpers_test.go +++ b/pkg/kubelet/dockershim/libdocker/helpers_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/stretchr/testify/assert" ) diff --git a/pkg/kubelet/dockershim/libdocker/instrumented_client.go b/pkg/kubelet/dockershim/libdocker/instrumented_client.go index 9c2b78d4f10..13d4a3e6493 100644 --- a/pkg/kubelet/dockershim/libdocker/instrumented_client.go +++ b/pkg/kubelet/dockershim/libdocker/instrumented_client.go @@ -19,7 +19,8 @@ package libdocker import ( "time" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -71,7 +72,7 @@ func (in instrumentedInterface) InspectContainer(id string) (*dockertypes.Contai return out, err } -func (in instrumentedInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { +func (in instrumentedInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { const operation = "create_container" defer recordOperation(operation, time.Now()) @@ -89,7 +90,7 @@ func (in instrumentedInterface) StartContainer(id string) error { return err } -func (in instrumentedInterface) StopContainer(id string, timeout int) error { +func (in instrumentedInterface) StopContainer(id string, timeout time.Duration) error { const operation = "stop_container" defer recordOperation(operation, time.Now()) @@ -125,7 +126,7 @@ func (in instrumentedInterface) InspectImageByID(image string) (*dockertypes.Ima return out, err } -func (in instrumentedInterface) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { +func (in instrumentedInterface) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) { const operation = "list_images" defer recordOperation(operation, time.Now()) @@ -178,7 +179,7 @@ func (in instrumentedInterface) Info() (*dockertypes.Info, error) { return out, err } -func (in instrumentedInterface) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { +func (in instrumentedInterface) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) { const operation = "create_exec" defer recordOperation(operation, time.Now()) @@ -223,7 +224,7 @@ func (in instrumentedInterface) ImageHistory(id string) ([]dockertypes.ImageHist return out, err } -func (in instrumentedInterface) ResizeExecTTY(id string, height, width int) error { +func (in instrumentedInterface) ResizeExecTTY(id string, height, width uint) error { const operation = "resize_exec" defer recordOperation(operation, time.Now()) @@ -232,7 +233,7 @@ func (in instrumentedInterface) ResizeExecTTY(id string, height, width int) erro return err } -func (in instrumentedInterface) ResizeContainerTTY(id string, height, width int) error { +func (in instrumentedInterface) ResizeContainerTTY(id string, height, width uint) error { const operation = "resize_container" defer recordOperation(operation, time.Now()) diff --git a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index f3bbc3f0536..b74a5d04355 100644 --- a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -29,26 +29,17 @@ import ( "github.com/golang/glog" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockerapi "github.com/docker/docker/client" dockermessage "github.com/docker/docker/pkg/jsonmessage" dockerstdcopy "github.com/docker/docker/pkg/stdcopy" - dockerapi "github.com/docker/engine-api/client" - dockertypes "github.com/docker/engine-api/types" "golang.org/x/net/context" ) // kubeDockerClient is a wrapped layer of docker client for kubelet internal use. This layer is added to: // 1) Redirect stream for exec and attach operations. // 2) Wrap the context in this layer to make the Interface cleaner. -// 3) Stabilize the Interface. The engine-api is still under active development, the interface -// is not stabilized yet. However, the Interface is used in many files in Kubernetes, we may -// not want to change the interface frequently. With this layer, we can port the engine api to the -// Interface to avoid changing Interface as much as possible. -// (See -// * https://github.com/docker/engine-api/issues/89 -// * https://github.com/docker/engine-api/issues/137 -// * https://github.com/docker/engine-api/pull/140) -// TODO(random-liu): Swith to new docker interface by refactoring the functions in the old Interface -// one by one. type kubeDockerClient struct { // timeout is the timeout of short running docker operations. timeout time.Duration @@ -131,7 +122,7 @@ func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS return &containerJSON, nil } -func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { +func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { ctx, cancel := d.getTimeoutContext() defer cancel() // we provide an explicit default shm size as to not depend on docker daemon. @@ -152,18 +143,18 @@ func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfi func (d *kubeDockerClient) StartContainer(id string) error { ctx, cancel := d.getTimeoutContext() defer cancel() - err := d.client.ContainerStart(ctx, id) + err := d.client.ContainerStart(ctx, id, dockertypes.ContainerStartOptions{}) if ctxErr := contextError(ctx); ctxErr != nil { return ctxErr } return err } -// Stopping an already stopped container will not cause an error in engine-v1. -func (d *kubeDockerClient) StopContainer(id string, timeout int) error { - ctx, cancel := d.getCustomTimeoutContext(time.Duration(timeout) * time.Second) +// Stopping an already stopped container will not cause an error in dockerapi. +func (d *kubeDockerClient) StopContainer(id string, timeout time.Duration) error { + ctx, cancel := d.getCustomTimeoutContext(timeout) defer cancel() - err := d.client.ContainerStop(ctx, id, timeout) + err := d.client.ContainerStop(ctx, id, &timeout) if ctxErr := contextError(ctx); ctxErr != nil { return ctxErr } @@ -183,7 +174,7 @@ func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.Container func (d *kubeDockerClient) inspectImageRaw(ref string) (*dockertypes.ImageInspect, error) { ctx, cancel := d.getTimeoutContext() defer cancel() - resp, _, err := d.client.ImageInspectWithRaw(ctx, ref, true) + resp, _, err := d.client.ImageInspectWithRaw(ctx, ref) if ctxErr := contextError(ctx); ctxErr != nil { return nil, ctxErr } @@ -231,7 +222,7 @@ func (d *kubeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, return resp, err } -func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { +func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) { ctx, cancel := d.getTimeoutContext() defer cancel() images, err := d.client.ImageList(ctx, opts) @@ -429,7 +420,7 @@ func (d *kubeDockerClient) Info() (*dockertypes.Info, error) { } // TODO(random-liu): Add unit test for exec and attach functions, just like what go-dockerclient did. -func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { +func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) { ctx, cancel := d.getTimeoutContext() defer cancel() resp, err := d.client.ContainerExecCreate(ctx, id, opts) @@ -493,7 +484,7 @@ func (d *kubeDockerClient) AttachToContainer(id string, opts dockertypes.Contain return d.holdHijackedConnection(sopts.RawTerminal, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp) } -func (d *kubeDockerClient) ResizeExecTTY(id string, height, width int) error { +func (d *kubeDockerClient) ResizeExecTTY(id string, height, width uint) error { ctx, cancel := d.getCancelableContext() defer cancel() return d.client.ContainerExecResize(ctx, id, dockertypes.ResizeOptions{ @@ -502,7 +493,7 @@ func (d *kubeDockerClient) ResizeExecTTY(id string, height, width int) error { }) } -func (d *kubeDockerClient) ResizeContainerTTY(id string, height, width int) error { +func (d *kubeDockerClient) ResizeContainerTTY(id string, height, width uint) error { ctx, cancel := d.getCancelableContext() defer cancel() return d.client.ContainerResize(ctx, id, dockertypes.ResizeOptions{ diff --git a/pkg/kubelet/dockershim/libdocker/legacy_test.go b/pkg/kubelet/dockershim/libdocker/legacy_test.go index 651b11e4d0b..0a9cc007932 100644 --- a/pkg/kubelet/dockershim/libdocker/legacy_test.go +++ b/pkg/kubelet/dockershim/libdocker/legacy_test.go @@ -21,7 +21,7 @@ import ( "hash/adler32" "testing" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/kubelet/dockershim/security_context.go b/pkg/kubelet/dockershim/security_context.go index 71edd77f216..95a730dc03b 100644 --- a/pkg/kubelet/dockershim/security_context.go +++ b/pkg/kubelet/dockershim/security_context.go @@ -22,11 +22,9 @@ import ( "strings" "github.com/blang/semver" - dockercontainer "github.com/docker/engine-api/types/container" + dockercontainer "github.com/docker/docker/api/types/container" - "k8s.io/api/core/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/kubelet/dockershim/securitycontext" knetwork "k8s.io/kubernetes/pkg/kubelet/network" ) @@ -101,14 +99,9 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig * hostConfig.CapDrop = sc.GetCapabilities().DropCapabilities } if sc.SelinuxOptions != nil { - hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions( + hostConfig.SecurityOpt = addSELinuxOptions( hostConfig.SecurityOpt, - &v1.SELinuxOptions{ - User: sc.SelinuxOptions.User, - Role: sc.SelinuxOptions.Role, - Type: sc.SelinuxOptions.Type, - Level: sc.SelinuxOptions.Level, - }, + sc.SelinuxOptions, separator, ) } diff --git a/pkg/kubelet/dockershim/security_context_test.go b/pkg/kubelet/dockershim/security_context_test.go index 868beed8af5..3a252f0d33d 100644 --- a/pkg/kubelet/dockershim/security_context_test.go +++ b/pkg/kubelet/dockershim/security_context_test.go @@ -22,11 +22,10 @@ import ( "testing" "github.com/blang/semver" - dockercontainer "github.com/docker/engine-api/types/container" + dockercontainer "github.com/docker/docker/api/types/container" "github.com/stretchr/testify/assert" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/kubelet/dockershim/securitycontext" ) func TestModifyContainerConfig(t *testing.T) { @@ -83,10 +82,10 @@ func TestModifyHostConfig(t *testing.T) { } setSELinuxHC := &dockercontainer.HostConfig{ SecurityOpt: []string{ - fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser('='), "user"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole('='), "role"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelType('='), "type"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel('='), "level"), + fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"), + fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"), + fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"), + fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"), }, } @@ -184,10 +183,10 @@ func TestModifyHostConfigAndNamespaceOptionsForContainer(t *testing.T) { } setSELinuxHC := &dockercontainer.HostConfig{ SecurityOpt: []string{ - fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser('='), "user"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole('='), "role"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelType('='), "type"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel('='), "level"), + fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"), + fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"), + fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"), + fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"), }, IpcMode: dockercontainer.IpcMode(sandboxNSMode), NetworkMode: dockercontainer.NetworkMode(sandboxNSMode), @@ -415,10 +414,10 @@ func fullValidHostConfig() *dockercontainer.HostConfig { CapAdd: []string{"addCapA", "addCapB"}, CapDrop: []string{"dropCapA", "dropCapB"}, SecurityOpt: []string{ - fmt.Sprintf("%s:%s", securitycontext.DockerLabelUser('='), "user"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelRole('='), "role"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelType('='), "type"), - fmt.Sprintf("%s:%s", securitycontext.DockerLabelLevel('='), "level"), + fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"), + fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"), + fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"), + fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"), }, } } diff --git a/pkg/kubelet/dockershim/securitycontext/BUILD b/pkg/kubelet/dockershim/securitycontext/BUILD deleted file mode 100644 index a4da55bbf98..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/BUILD +++ /dev/null @@ -1,53 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake.go", - "provider.go", - "types.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/leaky:go_default_library", - "//pkg/securitycontext:go_default_library", - "//vendor/github.com/docker/engine-api/types/container:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["provider_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api/testing:go_default_library", - "//vendor/github.com/docker/engine-api/types/container:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/kubelet/dockershim/securitycontext/fake.go b/pkg/kubelet/dockershim/securitycontext/fake.go deleted file mode 100644 index 3217dcff1fe..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/fake.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package securitycontext - -import ( - "k8s.io/api/core/v1" - - dockercontainer "github.com/docker/engine-api/types/container" -) - -// NewFakeSecurityContextProvider creates a new, no-op security context provider. -func NewFakeSecurityContextProvider() SecurityContextProvider { - return FakeSecurityContextProvider{} -} - -type FakeSecurityContextProvider struct{} - -func (p FakeSecurityContextProvider) ModifyContainerConfig(pod *v1.Pod, container *v1.Container, config *dockercontainer.Config) { -} -func (p FakeSecurityContextProvider) ModifyHostConfig(pod *v1.Pod, container *v1.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) { -} diff --git a/pkg/kubelet/dockershim/securitycontext/provider.go b/pkg/kubelet/dockershim/securitycontext/provider.go deleted file mode 100644 index 0899b0c8a65..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/provider.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package securitycontext - -import ( - "fmt" - "strconv" - - "k8s.io/api/core/v1" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/leaky" - "k8s.io/kubernetes/pkg/securitycontext" - - dockercontainer "github.com/docker/engine-api/types/container" -) - -// NewSimpleSecurityContextProvider creates a new SimpleSecurityContextProvider. -func NewSimpleSecurityContextProvider(securityOptSeparator rune) SecurityContextProvider { - return SimpleSecurityContextProvider{securityOptSeparator} -} - -// SimpleSecurityContextProvider is the default implementation of a SecurityContextProvider. -type SimpleSecurityContextProvider struct { - securityOptSeparator rune -} - -// ModifyContainerConfig is called before the Docker createContainer call. -// The security context provider can make changes to the Config with which -// the container is created. -func (p SimpleSecurityContextProvider) ModifyContainerConfig(pod *v1.Pod, container *v1.Container, config *dockercontainer.Config) { - effectiveSC := securitycontext.DetermineEffectiveSecurityContext(pod, container) - if effectiveSC == nil { - return - } - if effectiveSC.RunAsUser != nil { - config.User = strconv.Itoa(int(*effectiveSC.RunAsUser)) - } -} - -// ModifyHostConfig is called before the Docker runContainer call. The -// security context provider can make changes to the HostConfig, affecting -// security options, whether the container is privileged, volume binds, etc. -func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *v1.Pod, container *v1.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) { - // Apply supplemental groups - if container.Name != leaky.PodInfraContainerName { - // TODO: We skip application of supplemental groups to the - // infra container to work around a runc issue which - // requires containers to have the '/etc/group'. For - // more information see: - // https://github.com/opencontainers/runc/pull/313 - // This can be removed once the fix makes it into the - // required version of docker. - if pod.Spec.SecurityContext != nil { - for _, group := range pod.Spec.SecurityContext.SupplementalGroups { - hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group))) - } - if pod.Spec.SecurityContext.FSGroup != nil { - hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(*pod.Spec.SecurityContext.FSGroup))) - } - } - - for _, group := range supplementalGids { - hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group))) - } - } - - // Apply effective security context for container - effectiveSC := securitycontext.DetermineEffectiveSecurityContext(pod, container) - if effectiveSC == nil { - return - } - - if effectiveSC.Privileged != nil { - hostConfig.Privileged = *effectiveSC.Privileged - } - - if effectiveSC.Capabilities != nil { - add, drop := kubecontainer.MakeCapabilities(effectiveSC.Capabilities.Add, effectiveSC.Capabilities.Drop) - hostConfig.CapAdd = add - hostConfig.CapDrop = drop - } - - if effectiveSC.SELinuxOptions != nil { - hostConfig.SecurityOpt = ModifySecurityOptions(hostConfig.SecurityOpt, effectiveSC.SELinuxOptions, p.securityOptSeparator) - } -} - -// ModifySecurityOptions adds SELinux options to config using the given -// separator. -func ModifySecurityOptions(config []string, selinuxOpts *v1.SELinuxOptions, separator rune) []string { - // Note, strictly speaking, we are actually mutating the values of these - // keys, rather than formatting name and value into a string. Docker re- - // uses the same option name multiple times (it's just 'label') with - // different values which are themselves key-value pairs. For example, - // the SELinux type is represented by the security opt: - // - // labeltype: - // - // In Docker API versions before 1.23, the separator was the `:` rune; in - // API version 1.23 it changed to the `=` rune. - config = modifySecurityOption(config, DockerLabelUser(separator), selinuxOpts.User) - config = modifySecurityOption(config, DockerLabelRole(separator), selinuxOpts.Role) - config = modifySecurityOption(config, DockerLabelType(separator), selinuxOpts.Type) - config = modifySecurityOption(config, DockerLabelLevel(separator), selinuxOpts.Level) - - return config -} - -// modifySecurityOption adds the security option of name to the config array -// with value in the form of name:value. -func modifySecurityOption(config []string, name, value string) []string { - if len(value) > 0 { - config = append(config, fmt.Sprintf("%s:%s", name, value)) - } - - return config -} diff --git a/pkg/kubelet/dockershim/securitycontext/provider_test.go b/pkg/kubelet/dockershim/securitycontext/provider_test.go deleted file mode 100644 index 1113ac25d87..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/provider_test.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package securitycontext - -import ( - "fmt" - "reflect" - "strconv" - "testing" - - dockercontainer "github.com/docker/engine-api/types/container" - "k8s.io/api/core/v1" - apitesting "k8s.io/kubernetes/pkg/api/testing" -) - -func TestModifyContainerConfig(t *testing.T) { - userID := int64(123) - overrideUserID := int64(321) - - cases := []struct { - name string - podSc *v1.PodSecurityContext - sc *v1.SecurityContext - expected *dockercontainer.Config - }{ - { - name: "container.SecurityContext.RunAsUser set", - sc: &v1.SecurityContext{ - RunAsUser: &userID, - }, - expected: &dockercontainer.Config{ - User: strconv.FormatInt(int64(userID), 10), - }, - }, - { - name: "no RunAsUser value set", - sc: &v1.SecurityContext{}, - expected: &dockercontainer.Config{}, - }, - { - name: "pod.Spec.SecurityContext.RunAsUser set", - podSc: &v1.PodSecurityContext{ - RunAsUser: &userID, - }, - expected: &dockercontainer.Config{ - User: strconv.FormatInt(int64(userID), 10), - }, - }, - { - name: "container.SecurityContext.RunAsUser overrides pod.Spec.SecurityContext.RunAsUser", - podSc: &v1.PodSecurityContext{ - RunAsUser: &userID, - }, - sc: &v1.SecurityContext{ - RunAsUser: &overrideUserID, - }, - expected: &dockercontainer.Config{ - User: strconv.FormatInt(int64(overrideUserID), 10), - }, - }, - } - - provider := NewSimpleSecurityContextProvider('=') - dummyContainer := &v1.Container{} - for _, tc := range cases { - pod := &v1.Pod{Spec: v1.PodSpec{SecurityContext: tc.podSc}} - dummyContainer.SecurityContext = tc.sc - dockerCfg := &dockercontainer.Config{} - - provider.ModifyContainerConfig(pod, dummyContainer, dockerCfg) - - if e, a := tc.expected, dockerCfg; !reflect.DeepEqual(e, a) { - t.Errorf("%v: unexpected modification of docker config\nExpected:\n\n%#v\n\nGot:\n\n%#v", tc.name, e, a) - } - } -} - -func TestModifyHostConfig(t *testing.T) { - priv := true - setPrivSC := &v1.SecurityContext{} - setPrivSC.Privileged = &priv - setPrivHC := &dockercontainer.HostConfig{ - Privileged: true, - } - - setCapsHC := &dockercontainer.HostConfig{ - CapAdd: []string{"addCapA", "addCapB"}, - CapDrop: []string{"dropCapA", "dropCapB"}, - } - - setSELinuxHC := &dockercontainer.HostConfig{} - setSELinuxHC.SecurityOpt = []string{ - fmt.Sprintf("%s:%s", DockerLabelUser(':'), "user"), - fmt.Sprintf("%s:%s", DockerLabelRole(':'), "role"), - fmt.Sprintf("%s:%s", DockerLabelType(':'), "type"), - fmt.Sprintf("%s:%s", DockerLabelLevel(':'), "level"), - } - - // seLinuxLabelsSC := fullValidSecurityContext() - // seLinuxLabelsHC := fullValidHostConfig() - - cases := []struct { - name string - podSc *v1.PodSecurityContext - sc *v1.SecurityContext - expected *dockercontainer.HostConfig - }{ - { - name: "fully set container.SecurityContext", - sc: fullValidSecurityContext(), - expected: fullValidHostConfig(), - }, - { - name: "container.SecurityContext.Privileged", - sc: setPrivSC, - expected: setPrivHC, - }, - { - name: "container.SecurityContext.Capabilities", - sc: &v1.SecurityContext{ - Capabilities: inputCapabilities(), - }, - expected: setCapsHC, - }, - { - name: "container.SecurityContext.SELinuxOptions", - sc: &v1.SecurityContext{ - SELinuxOptions: inputSELinuxOptions(), - }, - expected: setSELinuxHC, - }, - { - name: "pod.Spec.SecurityContext.SELinuxOptions", - podSc: &v1.PodSecurityContext{ - SELinuxOptions: inputSELinuxOptions(), - }, - expected: setSELinuxHC, - }, - { - name: "container.SecurityContext overrides pod.Spec.SecurityContext", - podSc: overridePodSecurityContext(), - sc: fullValidSecurityContext(), - expected: fullValidHostConfig(), - }, - } - - provider := NewSimpleSecurityContextProvider(':') - dummyContainer := &v1.Container{} - - for _, tc := range cases { - pod := &v1.Pod{Spec: v1.PodSpec{SecurityContext: tc.podSc}} - dummyContainer.SecurityContext = tc.sc - dockerCfg := &dockercontainer.HostConfig{} - - provider.ModifyHostConfig(pod, dummyContainer, dockerCfg, nil) - - if e, a := tc.expected, dockerCfg; !reflect.DeepEqual(e, a) { - t.Errorf("%v: unexpected modification of host config\nExpected:\n\n%#v\n\nGot:\n\n%#v", tc.name, e, a) - } - } -} - -func TestModifyHostConfigPodSecurityContext(t *testing.T) { - supplementalGroupsSC := &v1.PodSecurityContext{} - supplementalGroupsSC.SupplementalGroups = []int64{2222} - supplementalGroupHC := fullValidHostConfig() - supplementalGroupHC.GroupAdd = []string{"2222"} - fsGroupHC := fullValidHostConfig() - fsGroupHC.GroupAdd = []string{"1234"} - extraSupplementalGroupHC := fullValidHostConfig() - extraSupplementalGroupHC.GroupAdd = []string{"1234"} - bothHC := fullValidHostConfig() - bothHC.GroupAdd = []string{"2222", "1234"} - fsGroup := int64(1234) - extraSupplementalGroup := []int64{1234} - - testCases := map[string]struct { - securityContext *v1.PodSecurityContext - expected *dockercontainer.HostConfig - extraSupplementalGroups []int64 - }{ - "nil": { - securityContext: nil, - expected: fullValidHostConfig(), - extraSupplementalGroups: nil, - }, - "SupplementalGroup": { - securityContext: supplementalGroupsSC, - expected: supplementalGroupHC, - extraSupplementalGroups: nil, - }, - "FSGroup": { - securityContext: &v1.PodSecurityContext{FSGroup: &fsGroup}, - expected: fsGroupHC, - extraSupplementalGroups: nil, - }, - "FSGroup + SupplementalGroups": { - securityContext: &v1.PodSecurityContext{ - SupplementalGroups: []int64{2222}, - FSGroup: &fsGroup, - }, - expected: bothHC, - extraSupplementalGroups: nil, - }, - "ExtraSupplementalGroup": { - securityContext: nil, - expected: extraSupplementalGroupHC, - extraSupplementalGroups: extraSupplementalGroup, - }, - "ExtraSupplementalGroup + SupplementalGroups": { - securityContext: supplementalGroupsSC, - expected: bothHC, - extraSupplementalGroups: extraSupplementalGroup, - }, - } - - provider := NewSimpleSecurityContextProvider(':') - dummyContainer := &v1.Container{} - dummyContainer.SecurityContext = fullValidSecurityContext() - dummyPod := &v1.Pod{ - Spec: apitesting.V1DeepEqualSafePodSpec(), - } - - for k, v := range testCases { - dummyPod.Spec.SecurityContext = v.securityContext - dockerCfg := &dockercontainer.HostConfig{} - provider.ModifyHostConfig(dummyPod, dummyContainer, dockerCfg, v.extraSupplementalGroups) - if !reflect.DeepEqual(v.expected, dockerCfg) { - t.Errorf("unexpected modification of host config for %s. Expected: %#v Got: %#v", k, v.expected, dockerCfg) - } - } -} - -func TestModifySecurityOption(t *testing.T) { - testCases := []struct { - name string - config []string - optName string - optVal string - expected []string - }{ - { - name: "Empty val", - config: []string{"a:b", "c:d"}, - optName: "optA", - optVal: "", - expected: []string{"a:b", "c:d"}, - }, - { - name: "Valid", - config: []string{"a:b", "c:d"}, - optName: "e", - optVal: "f", - expected: []string{"a:b", "c:d", "e:f"}, - }, - } - - for _, tc := range testCases { - actual := modifySecurityOption(tc.config, tc.optName, tc.optVal) - if !reflect.DeepEqual(tc.expected, actual) { - t.Errorf("Failed to apply options correctly for tc: %s. Expected: %v but got %v", tc.name, tc.expected, actual) - } - } -} - -func overridePodSecurityContext() *v1.PodSecurityContext { - return &v1.PodSecurityContext{ - SELinuxOptions: &v1.SELinuxOptions{ - User: "user2", - Role: "role2", - Type: "type2", - Level: "level2", - }, - } -} - -func fullValidPodSecurityContext() *v1.PodSecurityContext { - return &v1.PodSecurityContext{ - SELinuxOptions: inputSELinuxOptions(), - } -} - -func fullValidSecurityContext() *v1.SecurityContext { - priv := true - return &v1.SecurityContext{ - Privileged: &priv, - Capabilities: inputCapabilities(), - SELinuxOptions: inputSELinuxOptions(), - } -} - -func inputCapabilities() *v1.Capabilities { - return &v1.Capabilities{ - Add: []v1.Capability{"addCapA", "addCapB"}, - Drop: []v1.Capability{"dropCapA", "dropCapB"}, - } -} - -func inputSELinuxOptions() *v1.SELinuxOptions { - return &v1.SELinuxOptions{ - User: "user", - Role: "role", - Type: "type", - Level: "level", - } -} - -func fullValidHostConfig() *dockercontainer.HostConfig { - return &dockercontainer.HostConfig{ - Privileged: true, - CapAdd: []string{"addCapA", "addCapB"}, - CapDrop: []string{"dropCapA", "dropCapB"}, - SecurityOpt: []string{ - fmt.Sprintf("%s:%s", DockerLabelUser(':'), "user"), - fmt.Sprintf("%s:%s", DockerLabelRole(':'), "role"), - fmt.Sprintf("%s:%s", DockerLabelType(':'), "type"), - fmt.Sprintf("%s:%s", DockerLabelLevel(':'), "level"), - }, - } -} diff --git a/pkg/kubelet/dockershim/securitycontext/types.go b/pkg/kubelet/dockershim/securitycontext/types.go deleted file mode 100644 index bf4768a7752..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/types.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package securitycontext - -import ( - "k8s.io/api/core/v1" - - dockercontainer "github.com/docker/engine-api/types/container" -) - -type SecurityContextProvider interface { - // ModifyContainerConfig is called before the Docker createContainer call. - // The security context provider can make changes to the Config with which - // the container is created. - ModifyContainerConfig(pod *v1.Pod, container *v1.Container, config *dockercontainer.Config) - - // ModifyHostConfig is called before the Docker createContainer call. - // The security context provider can make changes to the HostConfig, affecting - // security options, whether the container is privileged, volume binds, etc. - // An error is returned if it's not possible to secure the container as requested - // with a security context. - // - // - pod: the pod to modify the docker hostconfig for - // - container: the container to modify the hostconfig for - // - supplementalGids: additional supplemental GIDs associated with the pod's volumes - ModifyHostConfig(pod *v1.Pod, container *v1.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) -} diff --git a/pkg/kubelet/dockershim/securitycontext/util.go b/pkg/kubelet/dockershim/securitycontext/util.go deleted file mode 100644 index faf6ed53925..00000000000 --- a/pkg/kubelet/dockershim/securitycontext/util.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package securitycontext - -import ( - "fmt" -) - -// DockerLabelUser returns the fragment of a Docker security opt that -// describes the SELinux user. Note that strictly speaking this is not -// actually the name of the security opt, but a fragment of the whole key- -// value pair necessary to set the opt. -func DockerLabelUser(separator rune) string { - return fmt.Sprintf("label%cuser", separator) -} - -// DockerLabelRole returns the fragment of a Docker security opt that -// describes the SELinux role. Note that strictly speaking this is not -// actually the name of the security opt, but a fragment of the whole key- -// value pair necessary to set the opt. -func DockerLabelRole(separator rune) string { - return fmt.Sprintf("label%crole", separator) -} - -// DockerLabelType returns the fragment of a Docker security opt that -// describes the SELinux type. Note that strictly speaking this is not -// actually the name of the security opt, but a fragment of the whole key- -// value pair necessary to set the opt. -func DockerLabelType(separator rune) string { - return fmt.Sprintf("label%ctype", separator) -} - -// DockerLabelLevel returns the fragment of a Docker security opt that -// describes the SELinux level. Note that strictly speaking this is not -// actually the name of the security opt, but a fragment of the whole key- -// value pair necessary to set the opt. -func DockerLabelLevel(separator rune) string { - return fmt.Sprintf("label%clevel", separator) -} - -// DockerLaelDisable returns the Docker security opt that disables SELinux for -// the container. -func DockerLabelDisable(separator rune) string { - return fmt.Sprintf("label%cdisable", separator) -} diff --git a/pkg/kubelet/dockershim/selinux_util.go b/pkg/kubelet/dockershim/selinux_util.go new file mode 100644 index 00000000000..e8e2a07f623 --- /dev/null +++ b/pkg/kubelet/dockershim/selinux_util.go @@ -0,0 +1,92 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "fmt" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// selinuxLabelUser returns the fragment of a Docker security opt that +// describes the SELinux user. Note that strictly speaking this is not +// actually the name of the security opt, but a fragment of the whole key- +// value pair necessary to set the opt. +func selinuxLabelUser(separator rune) string { + return fmt.Sprintf("label%cuser", separator) +} + +// selinuxLabelRole returns the fragment of a Docker security opt that +// describes the SELinux role. Note that strictly speaking this is not +// actually the name of the security opt, but a fragment of the whole key- +// value pair necessary to set the opt. +func selinuxLabelRole(separator rune) string { + return fmt.Sprintf("label%crole", separator) +} + +// selinuxLabelType returns the fragment of a Docker security opt that +// describes the SELinux type. Note that strictly speaking this is not +// actually the name of the security opt, but a fragment of the whole key- +// value pair necessary to set the opt. +func selinuxLabelType(separator rune) string { + return fmt.Sprintf("label%ctype", separator) +} + +// selinuxLabelLevel returns the fragment of a Docker security opt that +// describes the SELinux level. Note that strictly speaking this is not +// actually the name of the security opt, but a fragment of the whole key- +// value pair necessary to set the opt. +func selinuxLabelLevel(separator rune) string { + return fmt.Sprintf("label%clevel", separator) +} + +// dockerLaelDisable returns the Docker security opt that disables SELinux for +// the container. +func selinuxLabelDisable(separator rune) string { + return fmt.Sprintf("label%cdisable", separator) +} + +// addSELinuxOptions adds SELinux options to config using the given +// separator. +func addSELinuxOptions(config []string, selinuxOpts *runtimeapi.SELinuxOption, separator rune) []string { + // Note, strictly speaking, we are actually mutating the values of these + // keys, rather than formatting name and value into a string. Docker re- + // uses the same option name multiple times (it's just 'label') with + // different values which are themselves key-value pairs. For example, + // the SELinux type is represented by the security opt: + // + // labeltype: + // + // In Docker API versions before 1.23, the separator was the `:` rune; in + // API version 1.23 it changed to the `=` rune. + config = modifySecurityOption(config, selinuxLabelUser(separator), selinuxOpts.User) + config = modifySecurityOption(config, selinuxLabelRole(separator), selinuxOpts.Role) + config = modifySecurityOption(config, selinuxLabelType(separator), selinuxOpts.Type) + config = modifySecurityOption(config, selinuxLabelLevel(separator), selinuxOpts.Level) + + return config +} + +// modifySecurityOption adds the security option of name to the config array +// with value in the form of name:value. +func modifySecurityOption(config []string, name, value string) []string { + if len(value) > 0 { + config = append(config, fmt.Sprintf("%s:%s", name, value)) + } + + return config +} diff --git a/pkg/kubelet/dockershim/selinux_util_test.go b/pkg/kubelet/dockershim/selinux_util_test.go new file mode 100644 index 00000000000..93ef091723e --- /dev/null +++ b/pkg/kubelet/dockershim/selinux_util_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "reflect" + "testing" +) + +func TestModifySecurityOptions(t *testing.T) { + testCases := []struct { + name string + config []string + optName string + optVal string + expected []string + }{ + { + name: "Empty val", + config: []string{"a:b", "c:d"}, + optName: "optA", + optVal: "", + expected: []string{"a:b", "c:d"}, + }, + { + name: "Valid", + config: []string{"a:b", "c:d"}, + optName: "e", + optVal: "f", + expected: []string{"a:b", "c:d", "e:f"}, + }, + } + + for _, tc := range testCases { + actual := modifySecurityOption(tc.config, tc.optName, tc.optVal) + if !reflect.DeepEqual(tc.expected, actual) { + t.Errorf("Failed to apply options correctly for tc: %s. Expected: %v but got %v", tc.name, tc.expected, actual) + } + } +} diff --git a/pkg/kubelet/gpu/OWNERS b/pkg/kubelet/gpu/OWNERS new file mode 100644 index 00000000000..ceb7c93336d --- /dev/null +++ b/pkg/kubelet/gpu/OWNERS @@ -0,0 +1,8 @@ +approvers: +- dchen1107 +- derekwaynecarr +- vishh +- yuyuhong +reviewers: +- cmluciano +- sig-node-reviewers \ No newline at end of file diff --git a/pkg/kubelet/gpu/nvidia/BUILD b/pkg/kubelet/gpu/nvidia/BUILD index cb136291152..c785ddebad1 100644 --- a/pkg/kubelet/gpu/nvidia/BUILD +++ b/pkg/kubelet/gpu/nvidia/BUILD @@ -44,6 +44,7 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go index d8613de527c..dc7253f0940 100644 --- a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go +++ b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go @@ -17,6 +17,8 @@ limitations under the License. package nvidia import ( + "os" + "reflect" "testing" "github.com/stretchr/testify/assert" @@ -26,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) type testActivePodsLister struct { @@ -60,6 +63,34 @@ func makeTestPod(numContainers, gpusPerContainer int) *v1.Pod { return pod } +func TestNewNvidiaGPUManager(t *testing.T) { + podLister := &testActivePodsLister{} + + // Expects nil GPUManager and an error with nil dockerClient. + testGpuManager1, err := NewNvidiaGPUManager(podLister, nil) + as := assert.New(t) + as.Nil(testGpuManager1) + as.NotNil(err) + + // Expects a GPUManager to be created with non-nil dockerClient. + fakeDocker := libdocker.NewFakeDockerClient() + testGpuManager2, err := NewNvidiaGPUManager(podLister, fakeDocker) + as.NotNil(testGpuManager2) + as.Nil(err) + + // Expects zero capacity without any GPUs. + gpuCapacity := testGpuManager2.Capacity() + as.Equal(len(gpuCapacity), 1) + rgpu := gpuCapacity[v1.ResourceNvidiaGPU] + as.Equal(rgpu.Value(), int64(0)) + + err2 := testGpuManager2.Start() + if !os.IsNotExist(err2) { + gpus := reflect.ValueOf(testGpuManager2).Elem().FieldByName("allGPUs").Len() + as.NotZero(gpus) + } +} + func TestMultiContainerPodGPUAllocation(t *testing.T) { podLister := &testActivePodsLister{} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index c76cb7532cd..766af05eb41 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -18,8 +18,6 @@ package kubelet import ( "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" "fmt" "net" "net/http" @@ -38,7 +36,6 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" - certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" clientv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,7 +57,6 @@ import ( "k8s.io/kubernetes/pkg/apis/componentconfig" componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - clientcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" @@ -102,7 +98,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/security/apparmor" - "k8s.io/kubernetes/pkg/util/bandwidth" utildbus "k8s.io/kubernetes/pkg/util/dbus" utilexec "k8s.io/kubernetes/pkg/util/exec" kubeio "k8s.io/kubernetes/pkg/util/io" @@ -711,7 +706,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub } ips = append(ips, cloudIPs...) names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...) - klet.serverCertificateManager, err = initializeServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names) + klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names) if err != nil { return nil, fmt.Errorf("failed to initialize certificate manager: %v", err) } @@ -1027,10 +1022,6 @@ type Kubelet struct { // clusterDomain and clusterDNS. resolverConfig string - // Optionally shape the bandwidth of a pod - // TODO: remove when kubenet plugin is ready - shaper bandwidth.BandwidthShaper - // Information about the ports which are opened by daemons on Node running this Kubelet server. daemonEndpoints *v1.NodeDaemonEndpoints @@ -1116,48 +1107,6 @@ type Kubelet struct { dockerLegacyService dockershim.DockerLegacyService } -func initializeServerCertificateManager(kubeClient clientset.Interface, kubeCfg *componentconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string) (certificate.Manager, error) { - var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface - if kubeClient != nil && kubeClient.Certificates() != nil { - certSigningRequestClient = kubeClient.Certificates().CertificateSigningRequests() - } - certificateStore, err := certificate.NewFileStore( - "kubelet-server", - kubeCfg.CertDirectory, - kubeCfg.CertDirectory, - kubeCfg.TLSCertFile, - kubeCfg.TLSPrivateKeyFile) - if err != nil { - return nil, fmt.Errorf("failed to initialize certificate store: %v", err) - } - return certificate.NewManager(&certificate.Config{ - CertificateSigningRequestClient: certSigningRequestClient, - Template: &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: fmt.Sprintf("system:node:%s", nodeName), - Organization: []string{"system:nodes"}, - }, - DNSNames: hostnames, - IPAddresses: ips, - }, - Usages: []certificates.KeyUsage{ - // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // - // Digital signature allows the certificate to be used to verify - // digital signatures used during TLS negotiation. - certificates.UsageDigitalSignature, - // KeyEncipherment allows the cert/key pair to be used to encrypt - // keys, including the symetric keys negotiated during TLS setup - // and used for data transfer. - certificates.UsageKeyEncipherment, - // ServerAuth allows the cert to be used by a TLS server to - // authenticate itself to a TLS client. - certificates.UsageServerAuth, - }, - CertificateStore: certificateStore, - }) -} - func allLocalIPsWithoutLoopback() ([]net.IP, error) { interfaces, err := net.Interfaces() if err != nil { @@ -1301,7 +1250,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { glog.Fatalf("Failed to start cAdvisor %v", err) } // eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs - kl.evictionManager.Start(kl, kl.GetActivePods, kl.podResourcesAreReclaimed, kl, evictionMonitoringPeriod) + kl.evictionManager.Start(kl.cadvisor, kl.GetActivePods, kl.podResourcesAreReclaimed, kl, evictionMonitoringPeriod) } // Run starts the kubelet reacting to config updates @@ -1632,28 +1581,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { return err } - // early successful exit if pod is not bandwidth-constrained - if !kl.shapingEnabled() { - return nil - } - - // Update the traffic shaping for the pod's ingress and egress limits - ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) - if err != nil { - return err - } - if egress != nil || ingress != nil { - if kubecontainer.IsHostNetworkPod(pod) { - kl.recorder.Event(pod, v1.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") - } else if kl.shaper != nil { - if len(apiPodStatus.PodIP) > 0 { - err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress) - } - } else { - kl.recorder.Event(pod, v1.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") - } - } - return nil } diff --git a/pkg/kubelet/kubelet_cadvisor.go b/pkg/kubelet/kubelet_cadvisor.go index dcb3de9728e..779095f5675 100644 --- a/pkg/kubelet/kubelet_cadvisor.go +++ b/pkg/kubelet/kubelet_cadvisor.go @@ -45,19 +45,6 @@ func (kl *Kubelet) GetContainerInfo(podFullName string, podUID types.UID, contai return &ci, nil } -// HasDedicatedImageFs returns true if the imagefs has a dedicated device. -func (kl *Kubelet) HasDedicatedImageFs() (bool, error) { - imageFsInfo, err := kl.ImagesFsInfo() - if err != nil { - return false, err - } - rootFsInfo, err := kl.RootFsInfo() - if err != nil { - return false, err - } - return imageFsInfo.Device != rootFsInfo.Device, nil -} - // GetContainerInfoV2 returns stats (from Cadvisor) for containers. func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { return kl.cadvisor.ContainerInfoV2(name, options) diff --git a/pkg/kubelet/kubelet_cadvisor_test.go b/pkg/kubelet/kubelet_cadvisor_test.go index bfbdc9e8812..aa269f68f9d 100644 --- a/pkg/kubelet/kubelet_cadvisor_test.go +++ b/pkg/kubelet/kubelet_cadvisor_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" cadvisorapi "github.com/google/cadvisor/info/v1" - cadvisorapiv2 "github.com/google/cadvisor/info/v2" "k8s.io/apimachinery/pkg/types" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -251,34 +250,3 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { assert.Len(t, result, 2) mockCadvisor.AssertExpectations(t) } - -func TestHasDedicatedImageFs(t *testing.T) { - testCases := map[string]struct { - imageFsInfo cadvisorapiv2.FsInfo - rootFsInfo cadvisorapiv2.FsInfo - expected bool - }{ - "has-dedicated-image-fs": { - imageFsInfo: cadvisorapiv2.FsInfo{Device: "123"}, - rootFsInfo: cadvisorapiv2.FsInfo{Device: "456"}, - expected: true, - }, - "has-unified-image-fs": { - imageFsInfo: cadvisorapiv2.FsInfo{Device: "123"}, - rootFsInfo: cadvisorapiv2.FsInfo{Device: "123"}, - expected: false, - }, - } - for testName, testCase := range testCases { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - defer testKubelet.Cleanup() - kubelet := testKubelet.kubelet - mockCadvisor := testKubelet.fakeCadvisor - mockCadvisor.On("Start").Return(nil) - mockCadvisor.On("ImagesFsInfo").Return(testCase.imageFsInfo, nil) - mockCadvisor.On("RootFsInfo").Return(testCase.rootFsInfo, nil) - actual, err := kubelet.HasDedicatedImageFs() - assert.NoError(t, err, "test [%s]", testName) - assert.Equal(t, testCase.expected, actual, "test [%s]", testName) - } -} diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index 5ac13653ac5..4e1a971bae5 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -25,10 +25,8 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/kubelet/network" - "k8s.io/kubernetes/pkg/util/bandwidth" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -244,51 +242,6 @@ func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, sear return nameservers, searches, nil } -// cleanupBandwidthLimits updates the status of bandwidth-limited containers -// and ensures that only the appropriate CIDRs are active on the node. -func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error { - if kl.shaper == nil { - return nil - } - currentCIDRs, err := kl.shaper.GetCIDRs() - if err != nil { - return err - } - possibleCIDRs := sets.String{} - for ix := range allPods { - pod := allPods[ix] - ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) - if err != nil { - return err - } - if ingress == nil && egress == nil { - glog.V(8).Infof("Not a bandwidth limited container...") - continue - } - status, found := kl.statusManager.GetPodStatus(pod.UID) - if !found { - // TODO(random-liu): Cleanup status get functions. (issue #20477) - s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) - if err != nil { - return err - } - status = kl.generateAPIPodStatus(pod, s) - } - if status.Phase == v1.PodRunning { - possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP)) - } - } - for _, cidr := range currentCIDRs { - if !possibleCIDRs.Has(cidr) { - glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs) - if err := kl.shaper.Reset(cidr); err != nil { - return err - } - } - } - return nil -} - // syncNetworkStatus updates the network state func (kl *Kubelet) syncNetworkStatus() { // For cri integration, network state will be updated in updateRuntimeUp, @@ -327,25 +280,6 @@ func (kl *Kubelet) updatePodCIDR(cidr string) { kl.runtimeState.setPodCIDR(cidr) } -// shapingEnabled returns whether traffic shaping is enabled. -func (kl *Kubelet) shapingEnabled() bool { - // Disable shaping if a network plugin is defined and supports shaping - if kl.networkPlugin != nil && kl.networkPlugin.Capabilities().Has(network.NET_PLUGIN_CAPABILITY_SHAPING) { - return false - } - // This is not strictly true but we need to figure out how to handle - // bandwidth shaping anyway. If the kubelet doesn't have a networkPlugin, - // it could mean: - // a. the kubelet is responsible for bandwidth shaping - // b. the kubelet is using cri, and the cri has a network plugin - // Today, the only plugin that understands bandwidth shaping is kubenet, and - // it doesn't support bandwidth shaping when invoked through cri, so it - // effectively boils down to letting the kubelet decide how to handle - // shaping annotations. The combination of (cri + network plugin that - // handles bandwidth shaping) may not work because of this. - return true -} - // syncNetworkUtil ensures the network utility are present on host. // Network util includes: // 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping diff --git a/pkg/kubelet/kubelet_network_test.go b/pkg/kubelet/kubelet_network_test.go index dc68b882681..5de1c582eda 100644 --- a/pkg/kubelet/kubelet_network_test.go +++ b/pkg/kubelet/kubelet_network_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/util/bandwidth" ) func TestNodeIPParam(t *testing.T) { @@ -184,85 +183,6 @@ func TestComposeDNSSearch(t *testing.T) { } } -func TestCleanupBandwidthLimits(t *testing.T) { - testPod := func(name, ingress string) *v1.Pod { - pod := podWithUidNameNs("", name, "") - - if len(ingress) != 0 { - pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress - } - - return pod - } - - // TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher - // layer status getter function and test that function instead. - tests := []struct { - status *v1.PodStatus - pods []*v1.Pod - inputCIDRs []string - expectResetCIDRs []string - name string - }{ - { - status: &v1.PodStatus{ - PodIP: "1.2.3.4", - Phase: v1.PodRunning, - }, - pods: []*v1.Pod{ - testPod("foo", "10M"), - testPod("bar", ""), - }, - inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, - expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"}, - name: "pod running", - }, - { - status: &v1.PodStatus{ - PodIP: "1.2.3.4", - Phase: v1.PodFailed, - }, - pods: []*v1.Pod{ - testPod("foo", "10M"), - testPod("bar", ""), - }, - inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, - expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, - name: "pod not running", - }, - { - status: &v1.PodStatus{ - PodIP: "1.2.3.4", - Phase: v1.PodFailed, - }, - pods: []*v1.Pod{ - testPod("foo", ""), - testPod("bar", ""), - }, - inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, - expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, - name: "no bandwidth limits", - }, - } - for _, test := range tests { - shaper := &bandwidth.FakeShaper{ - CIDRs: test.inputCIDRs, - } - - testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - defer testKube.Cleanup() - testKube.kubelet.shaper = shaper - - for _, pod := range test.pods { - testKube.kubelet.statusManager.SetPodStatus(pod, *test.status) - } - - err := testKube.kubelet.cleanupBandwidthLimits(test.pods) - assert.NoError(t, err, "test [%s]", test.name) - assert.EqualValues(t, test.expectResetCIDRs, shaper.ResetCIDRs, "test[%s]", test.name) - } -} - func TestGetIPTablesMark(t *testing.T) { tests := []struct { bit int diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 09e4230122f..944c405611a 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -549,6 +549,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity( int64(kl.maxPods), resource.DecimalSI) } + if node.Status.NodeInfo.BootID != "" && node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated @@ -557,25 +558,16 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) } node.Status.NodeInfo.BootID = info.BootID - } - if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - rootfs, err := kl.GetCachedRootFsInfo() - if err != nil { - node.Status.Capacity[v1.ResourceStorageScratch] = resource.MustParse("0Gi") - } else { - for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) { - node.Status.Capacity[rName] = rCap - } - } - - if hasDedicatedImageFs, _ := kl.HasDedicatedImageFs(); hasDedicatedImageFs { - imagesfs, err := kl.ImagesFsInfo() - if err != nil { - node.Status.Capacity[v1.ResourceStorageOverlay] = resource.MustParse("0Gi") - } else { - for rName, rCap := range cadvisor.StorageOverlayCapacityFromFsInfo(imagesfs) { - node.Status.Capacity[rName] = rCap + if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + // TODO: all the node resources should use GetCapacity instead of deriving the + // capacity for every node status request + initialCapacity := kl.containerManager.GetCapacity() + if initialCapacity != nil { + node.Status.Capacity[v1.ResourceStorageScratch] = initialCapacity[v1.ResourceStorageScratch] + imageCapacity, ok := initialCapacity[v1.ResourceStorageOverlay] + if ok { + node.Status.Capacity[v1.ResourceStorageOverlay] = imageCapacity } } } @@ -704,7 +696,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) { } // Append AppArmor status if it's enabled. - // TODO(timstclair): This is a temporary message until node feature reporting is added. + // TODO(tallclair): This is a temporary message until node feature reporting is added. if newNodeReadyCondition.Status == v1.ConditionTrue && kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil { newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message) diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 69f4278b161..6337e8bd117 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -115,12 +115,17 @@ func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) type localCM struct { cm.ContainerManager allocatable v1.ResourceList + capacity v1.ResourceList } func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList { return lcm.allocatable } +func (lcm *localCM) GetCapacity() v1.ResourceList { + return lcm.capacity +} + func TestUpdateNewNodeStatus(t *testing.T) { // generate one more than maxImagesInNodeStatus in inputImageList inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) @@ -134,6 +139,10 @@ func TestUpdateNewNodeStatus(t *testing.T) { v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} @@ -251,6 +260,18 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet + kubelet.containerManager = &localCM{ + ContainerManager: cm.NewStubContainerManager(), + allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), + }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + }, + } + kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain @@ -319,6 +340,10 @@ func TestUpdateExistingNodeStatus(t *testing.T) { v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + }, } kubeClient := testKubelet.fakeKubeClient @@ -503,6 +528,18 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet + kubelet.containerManager = &localCM{ + ContainerManager: cm.NewStubContainerManager(), + allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), + }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + }, + } + clock := testKubelet.fakeClock // Do not set nano second, because apiserver function doesn't support nano second. (Only support // RFC3339). @@ -658,6 +695,10 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + }, } clock := testKubelet.fakeClock @@ -1113,6 +1154,10 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI), }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 2784b433f1c..a84bddbe57f 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -156,7 +156,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h hostPath = filepath.Join(hostPath, mount.SubPath) - if subPathExists, err := util.FileExists(hostPath); err != nil { + if subPathExists, err := util.FileOrSymlinkExists(hostPath); err != nil { glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) } else if !subPathExists { // Create the sub path now because if it's auto-created later when referenced, it may have an @@ -729,9 +729,8 @@ func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret { return pullSecrets } -// Returns true if pod is in the terminated state ("Failed" or "Succeeded"). +// podIsTerminated returns true if pod is in the terminated state ("Failed" or "Succeeded"). func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { - var status v1.PodStatus // Check the cached pod status which was set after the last sync. status, ok := kl.statusManager.GetPodStatus(pod.UID) if !ok { @@ -893,12 +892,6 @@ func (kl *Kubelet) HandlePodCleanups() error { // Remove any orphaned mirror pods. kl.podManager.DeleteOrphanedMirrorPods() - // Clear out any old bandwidth rules - err = kl.cleanupBandwidthLimits(allPods) - if err != nil { - glog.Errorf("Failed cleaning up bandwidth limits: %v", err) - } - // Remove any cgroups in the hierarchy for pods that are no longer running. if kl.cgroupsPerQOS { kl.cleanupOrphanedPodCgroups(cgroupPods, activePods) @@ -947,7 +940,7 @@ func (kl *Kubelet) podKiller() { } } -// checkHostPortConflicts detects pods with conflicted host ports. +// hasHostPortConflicts detects pods with conflicted host ports. func hasHostPortConflicts(pods []*v1.Pod) bool { ports := sets.String{} for _, pod := range pods { @@ -1424,7 +1417,7 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe if container == nil { return nil, fmt.Errorf("container not found (%q)", containerName) } - // TODO(timstclair): Pass a proper timeout value. + // TODO(tallclair): Pass a proper timeout value. return kl.runner.RunInContainer(container.ID, cmd, 0) } @@ -1660,7 +1653,7 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { if pvc != nil { referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { - glog.Warningf("unable to retrieve pvc %s - %v", pvc.Spec.VolumeName, err) + glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue } if referencedVolume != nil && referencedVolume.Spec.HostPath != nil { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index f698fca9288..bf355f3d044 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -1249,10 +1249,21 @@ func TestFilterOutTerminatedPods(t *testing.T) { defer testKubelet.Cleanup() kubelet := testKubelet.kubelet pods := newTestPods(5) + now := metav1.NewTime(time.Now()) pods[0].Status.Phase = v1.PodFailed pods[1].Status.Phase = v1.PodSucceeded + // The pod is terminating, should not filter out. pods[2].Status.Phase = v1.PodRunning + pods[2].DeletionTimestamp = &now + pods[2].Status.ContainerStatuses = []v1.ContainerStatus{ + {State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: now, + }, + }}, + } pods[3].Status.Phase = v1.PodPending + pods[4].Status.Phase = v1.PodRunning expected := []*v1.Pod{pods[2], pods[3], pods[4]} kubelet.podManager.SetPods(pods) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 27b5b55e864..01127a0c81c 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -76,7 +76,7 @@ func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container if containerID != "" { eventMessage = strings.Replace(eventMessage, containerID, container.Name, -1) } - m.recorder.Event(events.ToObjectReference(ref), eventType, reason, eventMessage) + m.recorder.Event(ref, eventType, reason, eventMessage) } // startContainer starts a container and returns a message indicates why it is failed on error. @@ -155,7 +155,10 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart) if handlerErr != nil { m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, msg) - m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil) + if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil); err != nil { + glog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", + container.Name, kubeContainerID.String(), format.Pod(pod), ErrPostStartHook, err) + } return msg, ErrPostStartHook } } @@ -547,7 +550,10 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error { var containerSpec *v1.Container if pod != nil { - containerSpec = kubecontainer.GetContainerSpec(pod, containerName) + if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil { + return fmt.Errorf("failed to get containerSpec %q(id=%q) in pod %q when killing container for reason %q", + containerName, containerID.String(), format.Pod(pod), reason) + } } else { // Restore necessary information if one of the specs is nil. restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(containerID) @@ -556,6 +562,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec } pod, containerSpec = restoredPod, restoredContainer } + // From this point , pod and container must be non-nil. gracePeriod := int64(minimumGracePeriodInSeconds) switch { @@ -757,7 +764,7 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi // RunInContainer synchronously executes the command in the container, and returns the output. func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { stdout, stderr, err := m.runtimeService.ExecSync(id.ID, cmd, 0) - // NOTE(timstclair): This does not correctly interleave stdout & stderr, but should be sufficient + // NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient // for logging purposes. A combined output option will need to be added to the ExecSyncRequest // if more precise output ordering is ever required. return append(stdout, stderr...), err diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 8c79937bf3e..d519ea88ee3 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -69,6 +69,41 @@ func TestRemoveContainer(t *testing.T) { assert.Empty(t, containers) } +// TestKillContainer tests killing the container in a Pod. +func TestKillContainer(t *testing.T) { + _, _, m, _ := createTestRuntimeManager() + + tests := []struct { + caseName string + pod *v1.Pod + containerID kubecontainer.ContainerID + containerName string + reason string + gracePeriodOverride int64 + succeed bool + }{ + { + caseName: "Failed to find container in pods, expect to return error", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{UID: "pod1_id", Name: "pod1", Namespace: "default"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Name: "empty_container"}}}, + }, + containerID: kubecontainer.ContainerID{Type: "docker", ID: "not_exist_container_id"}, + containerName: "not_exist_container", + reason: "unknown reason", + gracePeriodOverride: 0, + succeed: false, + }, + } + + for _, test := range tests { + err := m.killContainer(test.pod, test.containerID, test.containerName, test.reason, &test.gracePeriodOverride) + if test.succeed != (err == nil) { + t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err) + } + } +} + // TestToKubeContainerStatus tests the converting the CRI container status to // the internal type (i.e., toKubeContainerStatus()) for containers in // different states. diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index e8541617b65..37da6963373 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -55,7 +55,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, switch { case handler.Exec != nil: var msg string - // TODO(timstclair): Pass a proper timeout value. + // TODO(tallclair): Pass a proper timeout value. output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { msg := fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) diff --git a/pkg/kubelet/network/hostport/BUILD b/pkg/kubelet/network/hostport/BUILD index 5a5aeb8d2bc..c210dbea125 100644 --- a/pkg/kubelet/network/hostport/BUILD +++ b/pkg/kubelet/network/hostport/BUILD @@ -19,8 +19,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/proxy/iptables:go_default_library", - "//pkg/util/dbus:go_default_library", - "//pkg/util/exec:go_default_library", "//pkg/util/iptables:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/network/hostport/hostport_manager.go b/pkg/kubelet/network/hostport/hostport_manager.go index 1499ff9c664..c2e588cd0e4 100644 --- a/pkg/kubelet/network/hostport/hostport_manager.go +++ b/pkg/kubelet/network/hostport/hostport_manager.go @@ -27,8 +27,6 @@ import ( "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" - utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilexec "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -51,11 +49,10 @@ type hostportManager struct { mu sync.Mutex } -func NewHostportManager() HostPortManager { - iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4) +func NewHostportManager(iptables utiliptables.Interface) HostPortManager { return &hostportManager{ hostPortMap: make(map[hostport]closeable), - iptables: iptInterface, + iptables: iptables, portOpener: openLocalPort, } } diff --git a/pkg/kubelet/network/hostport/hostport_syncer.go b/pkg/kubelet/network/hostport/hostport_syncer.go index d1c577dbdae..03cca1a8d80 100644 --- a/pkg/kubelet/network/hostport/hostport_syncer.go +++ b/pkg/kubelet/network/hostport/hostport_syncer.go @@ -27,8 +27,6 @@ import ( "github.com/golang/glog" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" - utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilexec "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -49,11 +47,10 @@ type hostportSyncer struct { portOpener hostportOpener } -func NewHostportSyncer() HostportSyncer { - iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4) +func NewHostportSyncer(iptables utiliptables.Interface) HostportSyncer { return &hostportSyncer{ hostPortMap: make(map[hostport]closeable), - iptables: iptInterface, + iptables: iptables, portOpener: openLocalPort, } } diff --git a/pkg/kubelet/network/kubenet/kubenet_linux.go b/pkg/kubelet/network/kubenet/kubenet_linux.go index 55a769f71a0..81aa2373b84 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -118,8 +118,8 @@ func NewPlugin(networkPluginDir string) network.NetworkPlugin { iptables: iptInterface, sysctl: sysctl, vendorDir: networkPluginDir, - hostportSyncer: hostport.NewHostportSyncer(), - hostportManager: hostport.NewHostportManager(), + hostportSyncer: hostport.NewHostportSyncer(iptInterface), + hostportManager: hostport.NewHostportManager(iptInterface), nonMasqueradeCIDR: "10.0.0.0/8", } } @@ -259,7 +259,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf if err == nil { setHairpin := plugin.hairpinMode == componentconfig.HairpinVeth // Set bridge address to first address in IPNet - cidr.IP.To4()[3] += 1 + cidr.IP[len(cidr.IP)-1] += 1 json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String()) glog.V(2).Infof("CNI network config set to %v", json) @@ -304,7 +304,7 @@ func (plugin *kubenetNetworkPlugin) Name() string { } func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { - return utilsets.NewInt(network.NET_PLUGIN_CAPABILITY_SHAPING) + return utilsets.NewInt() } // setup sets up networking through CNI using the given ns/name and sandbox ID. diff --git a/pkg/kubelet/network/kubenet/kubenet_linux_test.go b/pkg/kubelet/network/kubenet/kubenet_linux_test.go index c3fad8ca3f2..4a1a255b6bd 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux_test.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux_test.go @@ -125,7 +125,7 @@ func TestGetPodNetworkStatus(t *testing.T) { } } -// TestTeardownBeforeSetUp tests that a `TearDown` call does call +// TestTeardownCallsShaper tests that a `TearDown` call does call // `shaper.Reset` func TestTeardownCallsShaper(t *testing.T) { fexec := &exec.FakeExec{ @@ -232,37 +232,61 @@ func TestGenerateMacAddress(t *testing.T) { // TestInvocationWithoutRuntime invokes the plugin without a runtime. // This is how kubenet is invoked from the cri. func TestTearDownWithoutRuntime(t *testing.T) { - fhost := nettest.NewFakeHost(nil) - fhost.Legacy = false - fhost.Runtime = nil - mockcni := &mock_cni.MockCNI{} - - fexec := &exec.FakeExec{ - CommandScript: []exec.FakeCommandAction{}, - LookPathFunc: func(file string) (string, error) { - return fmt.Sprintf("/fake-bin/%s", file), nil + testCases := []struct { + podCIDR string + ip string + expectedGateway string + }{ + { + podCIDR: "10.0.0.1/24", + ip: "10.0.0.1", + expectedGateway: "10.0.0.1", + }, + { + podCIDR: "2001:beef::1/48", + ip: "2001:beef::1", + expectedGateway: "2001:beef::1", }, } + for _, tc := range testCases { + fhost := nettest.NewFakeHost(nil) + fhost.Legacy = false + fhost.Runtime = nil + mockcni := &mock_cni.MockCNI{} - kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost) - kubenet.cniConfig = mockcni - kubenet.iptables = ipttest.NewFake() + fexec := &exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{}, + LookPathFunc: func(file string) (string, error) { + return fmt.Sprintf("/fake-bin/%s", file), nil + }, + } - details := make(map[string]interface{}) - details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24" - kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details) + kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost) + kubenet.cniConfig = mockcni + kubenet.iptables = ipttest.NewFake() - existingContainerID := kubecontainer.BuildContainerID("docker", "123") - kubenet.podIPs[existingContainerID] = "10.0.0.1" + details := make(map[string]interface{}) + details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = tc.podCIDR + kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details) - mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil) + if kubenet.gateway.String() != tc.expectedGateway { + t.Errorf("generated gateway: %q, expecting: %q", kubenet.gateway.String(), tc.expectedGateway) + } + if kubenet.podCidr != tc.podCIDR { + t.Errorf("generated podCidr: %q, expecting: %q", kubenet.podCidr, tc.podCIDR) + } + existingContainerID := kubecontainer.BuildContainerID("docker", "123") + kubenet.podIPs[existingContainerID] = tc.ip - if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil { - t.Fatalf("Unexpected error in TearDownPod: %v", err) + mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil) + + if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil { + t.Fatalf("Unexpected error in TearDownPod: %v", err) + } + // Assert that the CNI DelNetwork made it through and we didn't crash + // without a runtime. + mockcni.AssertExpectations(t) } - // Assert that the CNI DelNetwork made it through and we didn't crash - // without a runtime. - mockcni.AssertExpectations(t) } //TODO: add unit test for each implementation of network plugin interface diff --git a/pkg/kubelet/network/plugins.go b/pkg/kubelet/network/plugins.go index 78b11550c87..53856f78a3c 100644 --- a/pkg/kubelet/network/plugins.go +++ b/pkg/kubelet/network/plugins.go @@ -43,12 +43,6 @@ const DefaultPluginName = "kubernetes.io/no-op" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" -// Plugin capabilities -const ( - // Indicates the plugin handles Kubernetes bandwidth shaping annotations internally - NET_PLUGIN_CAPABILITY_SHAPING int = 1 -) - // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once diff --git a/pkg/kubelet/preemption/preemption_test.go b/pkg/kubelet/preemption/preemption_test.go index ea7db60a626..fcd1950c360 100644 --- a/pkg/kubelet/preemption/preemption_test.go +++ b/pkg/kubelet/preemption/preemption_test.go @@ -458,21 +458,21 @@ func admissionRequirementListEqual(list1 admissionRequirementList, list2 admissi return true } -// this checks if the lists contents contain all of the same elements. -// this is not correct if there are duplicate pods in the list. -// for example: podListEqual([a, a, b], [a, b, b]) will return true +// podListEqual checks if the lists contents contain all of the same elements. func podListEqual(list1 []*v1.Pod, list2 []*v1.Pod) bool { if len(list1) != len(list2) { return false } - for _, a := range list1 { - contains := false - for _, b := range list2 { - if a == b { - contains = true - } - } - if !contains { + + m := map[*v1.Pod]int{} + for _, val := range list1 { + m[val] = m[val] + 1 + } + for _, val := range list2 { + m[val] = m[val] - 1 + } + for _, v := range m { + if v != 0 { return false } } diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index cf1838a5198..3da2dcbcaec 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -100,12 +100,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c if err != nil { glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { - pb.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) + pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { - pb.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) + pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err diff --git a/pkg/kubelet/rkt/BUILD b/pkg/kubelet/rkt/BUILD index 35abb2b2b73..33f8ca2a865 100644 --- a/pkg/kubelet/rkt/BUILD +++ b/pkg/kubelet/rkt/BUILD @@ -46,7 +46,7 @@ go_library( "//vendor/github.com/coreos/go-systemd/dbus:go_default_library", "//vendor/github.com/coreos/go-systemd/unit:go_default_library", "//vendor/github.com/coreos/rkt/api/v1alpha:go_default_library", - "//vendor/github.com/docker/engine-api/types:go_default_library", + "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", @@ -82,6 +82,7 @@ go_test( "//vendor/github.com/appc/spec/schema:go_default_library", "//vendor/github.com/appc/spec/schema/types:go_default_library", "//vendor/github.com/coreos/go-systemd/dbus:go_default_library", + "//vendor/github.com/coreos/go-systemd/unit:go_default_library", "//vendor/github.com/coreos/rkt/api/v1alpha:go_default_library", "//vendor/github.com/golang/mock/gomock:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/rkt/image.go b/pkg/kubelet/rkt/image.go index 6733bc3246c..225604c6129 100644 --- a/pkg/kubelet/rkt/image.go +++ b/pkg/kubelet/rkt/image.go @@ -30,7 +30,7 @@ import ( appcschema "github.com/appc/spec/schema" appctypes "github.com/appc/spec/schema/types" rktapi "github.com/coreos/rkt/api/v1alpha" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" "golang.org/x/net/context" "k8s.io/api/core/v1" diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 8a1ff5e9e79..6e6addfe033 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -95,6 +95,8 @@ const ( k8sRktRestartCountAnno = "rkt.kubernetes.io/restart-count" k8sRktTerminationMessagePathAnno = "rkt.kubernetes.io/termination-message-path" + k8sRktLimitNoFileAnno = "systemd-unit-option.rkt.kubernetes.io/LimitNOFILE" + // TODO(euank): This has significant security concerns as a stage1 image is // effectively root. // Furthermore, this (using an annotation) is a hack to pass an extra @@ -1148,6 +1150,23 @@ func constructSyslogIdentifier(generateName string, podName string) string { return podName } +// Setup additional systemd field specified in the Pod Annotation +func setupSystemdCustomFields(annotations map[string]string, unitOptionArray []*unit.UnitOption) ([]*unit.UnitOption, error) { + // LimitNOFILE + if strSize := annotations[k8sRktLimitNoFileAnno]; strSize != "" { + size, err := strconv.Atoi(strSize) + if err != nil { + return unitOptionArray, err + } + if size < 1 { + return unitOptionArray, fmt.Errorf("invalid value for %s: %s", k8sRktLimitNoFileAnno, strSize) + } + unitOptionArray = append(unitOptionArray, newUnitOption("Service", "LimitNOFILE", strSize)) + } + + return unitOptionArray, nil +} + // preparePod will: // // 1. Invoke 'rkt prepare' to prepare the pod, and get the rkt pod uuid. @@ -1235,6 +1254,11 @@ func (r *Runtime) preparePod(pod *v1.Pod, podIP string, pullSecrets []v1.Secret, units = append(units, newUnitOption("Service", "SELinuxContext", selinuxContext)) } + units, err = setupSystemdCustomFields(pod.Annotations, units) + if err != nil { + glog.Warningf("fail to add custom systemd fields provided by pod Annotations: %q", err) + } + serviceName := makePodServiceFileName(uuid) glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, format.Pod(pod)) serviceFile, err := r.os.Create(serviceFilePath(serviceName)) @@ -1271,13 +1295,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f uuid := utilstrings.ShortenString(id.uuid, 8) switch reason { case "Created": - r.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeNormal, events.CreatedContainer, "Created with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, "Created with rkt id %v", uuid) case "Started": - r.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeNormal, events.StartedContainer, "Started with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started with rkt id %v", uuid) case "Failed": - r.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeWarning, events.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) + r.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) case "Killing": - r.recorder.Eventf(events.ToObjectReference(ref), v1.EventTypeNormal, events.KillingContainer, "Killing with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.KillingContainer, "Killing with rkt id %v", uuid) default: glog.Errorf("rkt: Unexpected event %q", reason) } diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index 4adc84ae72a..18092169b6a 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -28,6 +28,7 @@ import ( appcschema "github.com/appc/spec/schema" appctypes "github.com/appc/spec/schema/types" + "github.com/coreos/go-systemd/unit" rktapi "github.com/coreos/rkt/api/v1alpha" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -2075,3 +2076,53 @@ func TestGetPodSystemdServiceFiles(t *testing.T) { } } } + +func TestSetupSystemdCustomFields(t *testing.T) { + testCases := []struct { + unitOpts []*unit.UnitOption + podAnnotations map[string]string + expectedValues []string + raiseErr bool + }{ + // without annotation + { + []*unit.UnitOption{ + {Section: "Service", Name: "ExecStart", Value: "/bin/true"}, + }, + map[string]string{}, + []string{"/bin/true"}, + false, + }, + // with valid annotation for LimitNOFile + { + []*unit.UnitOption{ + {Section: "Service", Name: "ExecStart", Value: "/bin/true"}, + }, + map[string]string{k8sRktLimitNoFileAnno: "1024"}, + []string{"/bin/true", "1024"}, + false, + }, + // with invalid annotation for LimitNOFile + { + []*unit.UnitOption{ + {Section: "Service", Name: "ExecStart", Value: "/bin/true"}, + }, + map[string]string{k8sRktLimitNoFileAnno: "-1"}, + []string{"/bin/true"}, + true, + }, + } + + for i, tt := range testCases { + raiseErr := false + newUnitsOpts, err := setupSystemdCustomFields(tt.podAnnotations, tt.unitOpts) + if err != nil { + raiseErr = true + } + assert.Equal(t, tt.raiseErr, raiseErr, fmt.Sprintf("Test case #%d", i)) + for _, opt := range newUnitsOpts { + assert.Equal(t, "Service", opt.Section, fmt.Sprintf("Test case #%d", i)) + assert.Contains(t, tt.expectedValues, opt.Value, fmt.Sprintf("Test case #%d", i)) + } + } +} diff --git a/pkg/kubelet/server/portforward/websocket.go b/pkg/kubelet/server/portforward/websocket.go index 22d5add067a..cb4bca04558 100644 --- a/pkg/kubelet/server/portforward/websocket.go +++ b/pkg/kubelet/server/portforward/websocket.go @@ -158,7 +158,6 @@ type websocketStreamPair struct { // request over a websocket connection type websocketStreamHandler struct { conn *wsstream.Conn - ports []int32 streamPairs []*websocketStreamPair pod string uid types.UID diff --git a/pkg/kubelet/server/remotecommand/httpstream.go b/pkg/kubelet/server/remotecommand/httpstream.go index f09b5e400b5..9d4883212d9 100644 --- a/pkg/kubelet/server/remotecommand/httpstream.go +++ b/pkg/kubelet/server/remotecommand/httpstream.go @@ -423,7 +423,7 @@ func handleResizeEvents(stream io.Reader, channel chan<- remotecommand.TerminalS } } -func v1WriteStatusFunc(stream io.WriteCloser) func(status *apierrors.StatusError) error { +func v1WriteStatusFunc(stream io.Writer) func(status *apierrors.StatusError) error { return func(status *apierrors.StatusError) error { if status.Status().Status == metav1.StatusSuccess { return nil // send error messages @@ -435,7 +435,7 @@ func v1WriteStatusFunc(stream io.WriteCloser) func(status *apierrors.StatusError // v4WriteStatusFunc returns a WriteStatusFunc that marshals a given api Status // as json in the error channel. -func v4WriteStatusFunc(stream io.WriteCloser) func(status *apierrors.StatusError) error { +func v4WriteStatusFunc(stream io.Writer) func(status *apierrors.StatusError) error { return func(status *apierrors.StatusError) error { bs, err := json.Marshal(status.Status()) if err != nil { diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 0b70b89b441..b9f33a967f9 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -267,8 +267,7 @@ func (s *Server) InstallDefaultHandlers() { healthz.PingHealthz, healthz.NamedCheck("syncloop", s.syncLoopHealthCheck), ) - var ws *restful.WebService - ws = new(restful.WebService) + ws := new(restful.WebService) ws. Path("/pods"). Produces(restful.MIME_JSON) @@ -296,9 +295,8 @@ const pprofBasePath = "/debug/pprof/" // InstallDebuggingHandlers registers the HTTP request patterns that serve logs or run commands/containers func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { glog.Infof("Adding debug handlers to kubelet server.") - var ws *restful.WebService - ws = new(restful.WebService) + ws := new(restful.WebService) ws. Path("/run") ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}"). @@ -429,7 +427,7 @@ func (s *Server) syncLoopHealthCheck(req *http.Request) error { } enterLoopTime := s.host.LatestLoopEntryTime() if !enterLoopTime.IsZero() && time.Now().After(enterLoopTime.Add(duration)) { - return fmt.Errorf("Sync Loop took longer than expected.") + return fmt.Errorf("sync Loop took longer than expected") } return nil } @@ -595,7 +593,7 @@ func getExecRequestParams(req *restful.Request) execRequestParams { podName: req.PathParameter("podID"), podUID: types.UID(req.PathParameter("uid")), containerName: req.PathParameter("containerName"), - cmd: req.Request.URL.Query()[api.ExecCommandParamm], + cmd: req.Request.URL.Query()[api.ExecCommandParam], } } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index e8b96aa0a11..045a6144b38 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -248,21 +248,6 @@ func newServerTest() *serverTestFramework { return fw } -// encodeJSON returns obj marshalled as a JSON string, panicing on any errors -func encodeJSON(obj interface{}) string { - data, err := json.Marshal(obj) - if err != nil { - panic(err) - } - return string(data) -} - -func readResp(resp *http.Response) (string, error) { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - return string(body), err -} - // A helper function to return the correct pod name. func getPodName(name, namespace string) string { if namespace == "" { diff --git a/pkg/kubelet/server/server_websocket_test.go b/pkg/kubelet/server/server_websocket_test.go index 080e09fb6b9..058b67d978a 100644 --- a/pkg/kubelet/server/server_websocket_test.go +++ b/pkg/kubelet/server/server_websocket_test.go @@ -172,7 +172,7 @@ func TestServeWSPortForward(t *testing.T) { } if test.containerData != "" { - channel, data, err = wsRead(ws) + _, data, err = wsRead(ws) if err != nil { t.Fatalf("%d: unexpected error reading container data: %v", i, err) } diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index d1d698d92ec..7a27a8b18de 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -73,7 +73,7 @@ func (sp *summaryProviderImpl) Get() (*stats.Summary, error) { } } - // TODO(timstclair): Consider returning a best-effort response if any of the following errors + // TODO(tallclair): Consider returning a best-effort response if any of the following errors // occur. node, err := sp.provider.GetNode() if err != nil { diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index be604aff3c4..eb39efd44c2 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -376,13 +376,6 @@ func checkMemoryStats(t *testing.T, label string, seed int, info v2.ContainerInf } } -func checkFsStats(t *testing.T, capacity uint64, Available uint64, inodes uint64, inodesFree uint64, fs *kubestats.FsStats) { - assert.EqualValues(t, capacity, *fs.CapacityBytes) - assert.EqualValues(t, Available, *fs.AvailableBytes) - assert.EqualValues(t, inodesFree, *fs.InodesFree) - assert.EqualValues(t, inodes, *fs.Inodes) -} - func TestCustomMetrics(t *testing.T) { spec := []v1.MetricSpec{ { diff --git a/pkg/kubelet/server/streaming/BUILD b/pkg/kubelet/server/streaming/BUILD index 5fe94b594c1..c2d62d4c3a9 100644 --- a/pkg/kubelet/server/streaming/BUILD +++ b/pkg/kubelet/server/streaming/BUILD @@ -45,9 +45,9 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/remotecommand:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/client-go/transport/spdy:go_default_library", ], ) diff --git a/pkg/kubelet/server/streaming/server.go b/pkg/kubelet/server/streaming/server.go index 875e444621f..ed4b6595321 100644 --- a/pkg/kubelet/server/streaming/server.go +++ b/pkg/kubelet/server/streaming/server.go @@ -101,7 +101,7 @@ var DefaultConfig = Config{ SupportedPortForwardProtocols: portforward.SupportedProtocols, } -// TODO(timstclair): Add auth(n/z) interface & handling. +// TODO(tallclair): Add auth(n/z) interface & handling. func NewServer(config Config, runtime Runtime) (Server, error) { s := &server{ config: config, @@ -193,7 +193,7 @@ func (s *server) GetPortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi func (s *server) Start(stayUp bool) error { if !stayUp { - // TODO(timstclair): Implement this. + // TODO(tallclair): Implement this. return errors.New("stayUp=false is not yet implemented") } @@ -210,7 +210,7 @@ func (s *server) Start(stayUp bool) error { } func (s *server) Stop() error { - // TODO(timstclair): Implement this. + // TODO(tallclair): Implement this. return errors.New("not yet implemented") } diff --git a/pkg/kubelet/server/streaming/server_test.go b/pkg/kubelet/server/streaming/server_test.go index 806ba01c986..f368e268f29 100644 --- a/pkg/kubelet/server/streaming/server_test.go +++ b/pkg/kubelet/server/streaming/server_test.go @@ -30,9 +30,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport/spdy" "k8s.io/kubernetes/pkg/api" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubeletportforward "k8s.io/kubernetes/pkg/kubelet/server/portforward" @@ -237,9 +237,10 @@ func TestServePortForward(t *testing.T) { reqURL, err := url.Parse(resp.Url) require.NoError(t, err) - exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", reqURL) + transport, upgrader, err := spdy.RoundTripperFor(&restclient.Config{}) require.NoError(t, err) - streamConn, _, err := exec.Dial(kubeletportforward.ProtocolV1Name) + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", reqURL) + streamConn, _, err := dialer.Dial(kubeletportforward.ProtocolV1Name) require.NoError(t, err) defer streamConn.Close() @@ -297,15 +298,14 @@ func runRemoteCommandTest(t *testing.T, commandType string) { go func() { defer wg.Done() - exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", reqURL) + exec, err := remotecommand.NewSPDYExecutor(&restclient.Config{}, "POST", reqURL) require.NoError(t, err) opts := remotecommand.StreamOptions{ - SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, - Stdin: stdinR, - Stdout: stdoutW, - Stderr: stderrW, - Tty: false, + Stdin: stdinR, + Stdout: stdoutW, + Stderr: stderrW, + Tty: false, } require.NoError(t, exec.Stream(opts)) }() diff --git a/pkg/kubelet/types/BUILD b/pkg/kubelet/types/BUILD index a9d5a2d42d8..622451e2aa8 100644 --- a/pkg/kubelet/types/BUILD +++ b/pkg/kubelet/types/BUILD @@ -38,6 +38,7 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], ) diff --git a/pkg/kubelet/types/pod_update_test.go b/pkg/kubelet/types/pod_update_test.go index 19a3b379575..46d27829f9b 100644 --- a/pkg/kubelet/types/pod_update_test.go +++ b/pkg/kubelet/types/pod_update_test.go @@ -19,7 +19,10 @@ package types import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestGetValidatedSources(t *testing.T) { @@ -42,3 +45,134 @@ func TestGetValidatedSources(t *testing.T) { sources, err = GetValidatedSources([]string{"taco"}) require.Error(t, err) } + +func TestGetPodSource(t *testing.T) { + cases := []struct { + pod v1.Pod + expected string + errExpected bool + }{ + { + pod: v1.Pod{}, + expected: "", + errExpected: true, + }, + { + pod: v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "kubernetes.io/config.source": "host-ipc-sources", + }, + }, + }, + expected: "host-ipc-sources", + errExpected: false, + }, + } + for i, data := range cases { + source, err := GetPodSource(&data.pod) + if data.errExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, data.expected, source, "test[%d]", i) + t.Logf("Test case [%d]", i) + } +} + +func TestString(t *testing.T) { + cases := []struct { + sp SyncPodType + expected string + }{ + { + sp: SyncPodCreate, + expected: "create", + }, + { + sp: SyncPodUpdate, + expected: "update", + }, + { + sp: SyncPodSync, + expected: "sync", + }, + { + sp: SyncPodKill, + expected: "kill", + }, + { + sp: 50, + expected: "unknown", + }, + } + for i, data := range cases { + syncPodString := data.sp.String() + assert.Equal(t, data.expected, syncPodString, "test[%d]", i) + t.Logf("Test case [%d]", i) + } +} + +func TestIsCriticalPod(t *testing.T) { + cases := []struct { + pod v1.Pod + expected bool + }{ + { + pod: v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns", + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/critical-pod": "", + }, + }, + }, + expected: false, + }, + { + pod: v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns", + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/critical-pod": "abc", + }, + }, + }, + expected: false, + }, + { + pod: v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "kube-system", + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/critical-pod": "abc", + }, + }, + }, + expected: false, + }, + { + pod: v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "kube-system", + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/critical-pod": "", + }, + }, + }, + expected: true, + }, + } + for i, data := range cases { + actual := IsCriticalPod(&data.pod) + if actual != data.expected { + t.Errorf("IsCriticalPod result wrong:\nexpected: %v\nactual: %v for test[%d] with Annotations: %v", + data.expected, actual, i, data.pod.Annotations) + } + } +} diff --git a/pkg/kubelet/util.go b/pkg/kubelet/util.go index 514781525e8..3047c2fa619 100644 --- a/pkg/kubelet/util.go +++ b/pkg/kubelet/util.go @@ -88,7 +88,7 @@ func allowHostNetwork(pod *v1.Pod) (bool, error) { return false, nil } -// Determined whether the specified pod is allowed to use host networking +// Determined whether the specified pod is allowed to use host PID func allowHostPID(pod *v1.Pod) (bool, error) { podSource, err := kubetypes.GetPodSource(pod) if err != nil { diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 5031be77c04..3cdcdc6dd5d 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -260,6 +260,8 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { return } + allVolumesAdded := true + // Process volume spec for each volume defined in pod for _, podVolume := range pod.Spec.Volumes { volumeSpec, volumeGidValue, err := @@ -270,6 +272,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { podVolume.Name, format.Pod(pod), err) + allVolumesAdded = false continue } @@ -283,6 +286,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { volumeSpec.Name(), uniquePodName, err) + allVolumesAdded = false } glog.V(10).Infof( @@ -292,7 +296,11 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { uniquePodName) } - dswp.markPodProcessed(uniquePodName) + // some of the volume additions may have failed, should not mark this pod as fully processed + if allVolumesAdded { + dswp.markPodProcessed(uniquePodName) + } + } // podPreviouslyProcessed returns true if the volumes for this pod have already @@ -327,6 +335,7 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod( // createVolumeSpec creates and returns a mutatable volume.Spec object for the // specified volume. It dereference any PVC to get PV objects, if needed. +// Returns an error if unable to obtain the volume at this time. func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( podVolume v1.Volume, podNamespace string) (*volume.Spec, string, error) { if pvcSource := @@ -409,6 +418,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( } if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { + return "", "", fmt.Errorf( "PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", namespace, diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 773f6eb4d52..590b580cda8 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -92,6 +92,44 @@ func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) { } } +func TestInitialPendingVolumesForPodAndGetVolumesInUse(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") + if err != nil { + t.Fatalf("can't make a temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager()) + + node, pod, pv, claim := createObjects() + claim.Status = v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimPending, + } + + kubeClient := fake.NewSimpleClientset(node, pod, pv, claim) + + manager := newTestVolumeManager(tmpDir, podManager, kubeClient) + + stopCh := runVolumeManager(manager) + defer close(stopCh) + + podManager.SetPods([]*v1.Pod{pod}) + + // Fake node status update + go simulateVolumeInUseUpdate( + v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name), + stopCh, + manager) + + // delayed claim binding + go delayClaimBecomesBound(kubeClient, claim.GetNamespace(), claim.ObjectMeta.Name) + + err = manager.WaitForAttachAndMount(pod) + if err != nil { + t.Errorf("Expected success: %v", err) + } + +} + func TestGetExtraSupplementalGroupsForPod(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") if err != nil { @@ -279,6 +317,20 @@ func simulateVolumeInUseUpdate(volumeName v1.UniqueVolumeName, stopCh <-chan str } } +func delayClaimBecomesBound( + kubeClient clientset.Interface, + namespace, claimName string, +) { + time.Sleep(500 * time.Millisecond) + volumeClaim, _ := + kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + volumeClaim.Status = v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + } + kubeClient.Core().PersistentVolumeClaims(namespace).Update(volumeClaim) + return +} + func runVolumeManager(manager VolumeManager) chan struct{} { stopCh := make(chan struct{}) //readyCh := make(chan bool, 1) diff --git a/pkg/master/BUILD b/pkg/master/BUILD index 77e7258ddfb..841ae34168d 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -43,7 +43,6 @@ go_library( "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/kubelet/client:go_default_library", - "//pkg/master/thirdparty:go_default_library", "//pkg/master/tunneler:go_default_library", "//pkg/registry/admissionregistration/rest:go_default_library", "//pkg/registry/apps/rest:go_default_library", @@ -166,8 +165,8 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/master/controller/crdregistration:all-srcs", "//pkg/master/ports:all-srcs", - "//pkg/master/thirdparty:all-srcs", "//pkg/master/tunneler:all-srcs", ], tags = ["automanaged"], diff --git a/pkg/master/OWNERS b/pkg/master/OWNERS index d02cb99f007..739cc9ee46a 100644 --- a/pkg/master/OWNERS +++ b/pkg/master/OWNERS @@ -18,7 +18,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -31,7 +30,7 @@ reviewers: - justinsb - roberthbailey - ncdc -- timstclair +- tallclair - mwielgus - timothysc - soltysh diff --git a/pkg/master/controller/crdregistration/BUILD b/pkg/master/controller/crdregistration/BUILD new file mode 100644 index 00000000000..b079131ba03 --- /dev/null +++ b/pkg/master/controller/crdregistration/BUILD @@ -0,0 +1,58 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["crdregistration_controller.go"], + tags = ["automanaged"], + deps = [ + "//pkg/controller:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/util/workqueue:go_default_library", + "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["crdregistration_controller_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", + ], +) diff --git a/pkg/master/thirdparty/tprregistration_controller.go b/pkg/master/controller/crdregistration/crdregistration_controller.go similarity index 69% rename from pkg/master/thirdparty/tprregistration_controller.go rename to pkg/master/controller/crdregistration/crdregistration_controller.go index 6e0bcc3fac5..62b29114165 100644 --- a/pkg/master/thirdparty/tprregistration_controller.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package thirdparty +package crdregistration import ( "fmt" @@ -33,11 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/kube-aggregator/pkg/apis/apiregistration" - "k8s.io/kubernetes/pkg/apis/extensions" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion" - listers "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" ) // AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for @@ -49,9 +45,7 @@ type AutoAPIServiceRegistration interface { RemoveAPIServiceToSync(name string) } -type tprRegistrationController struct { - tprLister listers.ThirdPartyResourceLister - tprSynced cache.InformerSynced +type crdRegistrationController struct { crdLister crdlisters.CustomResourceDefinitionLister crdSynced cache.InformerSynced @@ -68,44 +62,15 @@ type tprRegistrationController struct { // controller so they automatically stay in sync. // In order to stay sane with both TPR and CRD present, we have a single controller that manages both. When choosing whether to have an // APIService, we simply iterate through both. -func NewAutoRegistrationController(tprInformer informers.ThirdPartyResourceInformer, crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *tprRegistrationController { - c := &tprRegistrationController{ - tprLister: tprInformer.Lister(), - tprSynced: tprInformer.Informer().HasSynced, +func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *crdRegistrationController { + c := &crdRegistrationController{ crdLister: crdinformer.Lister(), crdSynced: crdinformer.Informer().HasSynced, apiServiceRegistration: apiServiceRegistration, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "tpr-autoregister"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd-autoregister"), } c.syncHandler = c.handleVersionUpdate - tprInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - cast := obj.(*extensions.ThirdPartyResource) - c.enqueueTPR(cast) - }, - UpdateFunc: func(_, obj interface{}) { - cast := obj.(*extensions.ThirdPartyResource) - c.enqueueTPR(cast) - }, - DeleteFunc: func(obj interface{}) { - cast, ok := obj.(*extensions.ThirdPartyResource) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.V(2).Infof("Couldn't get object from tombstone %#v", obj) - return - } - cast, ok = tombstone.Obj.(*extensions.ThirdPartyResource) - if !ok { - glog.V(2).Infof("Tombstone contained unexpected object: %#v", obj) - return - } - } - c.enqueueTPR(cast) - }, - }) - crdinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { cast := obj.(*apiextensions.CustomResourceDefinition) @@ -136,16 +101,16 @@ func NewAutoRegistrationController(tprInformer informers.ThirdPartyResourceInfor return c } -func (c *tprRegistrationController) Run(threadiness int, stopCh <-chan struct{}) { +func (c *crdRegistrationController) Run(threadiness int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() // make sure the work queue is shutdown which will trigger workers to end defer c.queue.ShutDown() - glog.Infof("Starting tpr-autoregister controller") - defer glog.Infof("Shutting down tpr-autoregister controller") + glog.Infof("Starting crd-autoregister controller") + defer glog.Infof("Shutting down crd-autoregister controller") // wait for your secondary caches to fill before starting your work - if !controller.WaitForCacheSync("tpr-autoregister", stopCh, c.tprSynced) { + if !controller.WaitForCacheSync("crd-autoregister", stopCh, c.crdSynced) { return } @@ -160,7 +125,7 @@ func (c *tprRegistrationController) Run(threadiness int, stopCh <-chan struct{}) <-stopCh } -func (c *tprRegistrationController) runWorker() { +func (c *crdRegistrationController) runWorker() { // hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work // available, so we don't worry about secondary waits for c.processNextWorkItem() { @@ -168,7 +133,7 @@ func (c *tprRegistrationController) runWorker() { } // processNextWorkItem deals with one key off the queue. It returns false when it's time to quit. -func (c *tprRegistrationController) processNextWorkItem() bool { +func (c *crdRegistrationController) processNextWorkItem() bool { // pull the next work item from queue. It should be a key we use to lookup something in a cache key, quit := c.queue.Get() if quit { @@ -198,43 +163,14 @@ func (c *tprRegistrationController) processNextWorkItem() bool { return true } -func (c *tprRegistrationController) enqueueTPR(tpr *extensions.ThirdPartyResource) { - _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(tpr) - if err != nil { - utilruntime.HandleError(err) - return - } - for _, version := range tpr.Versions { - c.queue.Add(schema.GroupVersion{Group: group, Version: version.Name}) - } -} - -func (c *tprRegistrationController) enqueueCRD(crd *apiextensions.CustomResourceDefinition) { +func (c *crdRegistrationController) enqueueCRD(crd *apiextensions.CustomResourceDefinition) { c.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}) } -func (c *tprRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error { +func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error { found := false apiServiceName := groupVersion.Version + "." + groupVersion.Group - // check all TPRs. There shouldn't that many, but if we have problems later we can index them - tprs, err := c.tprLister.List(labels.Everything()) - if err != nil { - return err - } - for _, tpr := range tprs { - _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(tpr) - if err != nil { - return err - } - for _, version := range tpr.Versions { - if version.Name == groupVersion.Version && group == groupVersion.Group { - found = true - break - } - } - } - // check all CRDs. There shouldn't that many, but if we have problems later we can index them crds, err := c.crdLister.List(labels.Everything()) if err != nil { diff --git a/pkg/master/thirdparty/tprregistration_controller_test.go b/pkg/master/controller/crdregistration/crdregistration_controller_test.go similarity index 58% rename from pkg/master/thirdparty/tprregistration_controller_test.go rename to pkg/master/controller/crdregistration/crdregistration_controller_test.go index 97c6e28ec2f..1e2d8df5879 100644 --- a/pkg/master/thirdparty/tprregistration_controller_test.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package thirdparty +package crdregistration import ( "reflect" @@ -25,87 +25,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" "k8s.io/kube-aggregator/pkg/apis/apiregistration" - "k8s.io/kubernetes/pkg/apis/extensions" - listers "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" ) -func TestEnqueue(t *testing.T) { - c := tprRegistrationController{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "tpr-autoregister"), - } - - tpr := &extensions.ThirdPartyResource{ - ObjectMeta: metav1.ObjectMeta{Name: "resource.group.example.com"}, - Versions: []extensions.APIVersion{ - {Name: "v1alpha1"}, - {Name: "v1"}, - }, - } - c.enqueueTPR(tpr) - - first, _ := c.queue.Get() - expectedFirst := schema.GroupVersion{Group: "group.example.com", Version: "v1alpha1"} - if first != expectedFirst { - t.Errorf("expected %v, got %v", expectedFirst, first) - } - - second, _ := c.queue.Get() - expectedSecond := schema.GroupVersion{Group: "group.example.com", Version: "v1"} - if second != expectedSecond { - t.Errorf("expected %v, got %v", expectedSecond, second) - } -} - func TestHandleVersionUpdate(t *testing.T) { tests := []struct { name string - startingTPRs []*extensions.ThirdPartyResource startingCRDs []*apiextensions.CustomResourceDefinition version schema.GroupVersion expectedAdded []*apiregistration.APIService expectedRemoved []string }{ - { - name: "simple add tpr", - startingTPRs: []*extensions.ThirdPartyResource{ - { - ObjectMeta: metav1.ObjectMeta{Name: "resource.group.com"}, - Versions: []extensions.APIVersion{ - {Name: "v1"}, - }, - }, - }, - version: schema.GroupVersion{Group: "group.com", Version: "v1"}, - - expectedAdded: []*apiregistration.APIService{ - { - ObjectMeta: metav1.ObjectMeta{Name: "v1.group.com"}, - Spec: apiregistration.APIServiceSpec{ - Group: "group.com", - Version: "v1", - GroupPriorityMinimum: 1000, - VersionPriority: 100, - }, - }, - }, - }, - { - name: "simple remove tpr", - startingTPRs: []*extensions.ThirdPartyResource{ - { - ObjectMeta: metav1.ObjectMeta{Name: "resource.group.com"}, - Versions: []extensions.APIVersion{ - {Name: "v1"}, - }, - }, - }, - version: schema.GroupVersion{Group: "group.com", Version: "v2"}, - - expectedRemoved: []string{"v2.group.com"}, - }, { name: "simple add crd", startingCRDs: []*apiextensions.CustomResourceDefinition{ @@ -148,18 +79,12 @@ func TestHandleVersionUpdate(t *testing.T) { for _, test := range tests { registration := &fakeAPIServiceRegistration{} - tprCache := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - tprLister := listers.NewThirdPartyResourceLister(tprCache) crdCache := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) crdLister := crdlisters.NewCustomResourceDefinitionLister(crdCache) - c := tprRegistrationController{ - tprLister: tprLister, + c := crdRegistrationController{ crdLister: crdLister, apiServiceRegistration: registration, } - for i := range test.startingTPRs { - tprCache.Add(test.startingTPRs[i]) - } for i := range test.startingCRDs { crdCache.Add(test.startingCRDs[i]) } diff --git a/pkg/master/master.go b/pkg/master/master.go index d3fbd5cf9a5..1b4a6a18b64 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -45,7 +45,6 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" serverstorage "k8s.io/apiserver/pkg/server/storage" @@ -54,7 +53,6 @@ import ( corev1client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" - "k8s.io/kubernetes/pkg/master/thirdparty" "k8s.io/kubernetes/pkg/master/tunneler" "k8s.io/kubernetes/pkg/routes" nodeutil "k8s.io/kubernetes/pkg/util/node" @@ -210,7 +208,7 @@ func (c *Config) SkipComplete() completedConfig { // Certain config fields will be set to a default value if unset. // Certain config fields must be specified, including: // KubeletClientConfig -func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget, crdRESTOptionsGetter genericregistry.RESTOptionsGetter) (*Master, error) { +func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Master, error) { if reflect.DeepEqual(c.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) { return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig") } @@ -258,8 +256,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget, autoscalingrest.RESTStorageProvider{}, batchrest.RESTStorageProvider{}, certificatesrest.RESTStorageProvider{}, - // TODO(enisoc): Remove crdRESTOptionsGetter input argument when TPR code is removed. - extensionsrest.RESTStorageProvider{ResourceInterface: thirdparty.NewThirdPartyResourceServer(s, s.DiscoveryGroupManager, c.StorageFactory, crdRESTOptionsGetter)}, + extensionsrest.RESTStorageProvider{}, networkingrest.RESTStorageProvider{}, policyrest.RESTStorageProvider{}, rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorizer}, @@ -412,7 +409,6 @@ func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig { extensionsapiv1beta1.SchemeGroupVersion.WithResource("ingresses"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("networkpolicies"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("replicasets"), - extensionsapiv1beta1.SchemeGroupVersion.WithResource("thirdpartyresources"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"), ) diff --git a/pkg/master/master_openapi_test.go b/pkg/master/master_openapi_test.go index 7e9f0c78635..4629e4159eb 100644 --- a/pkg/master/master_openapi_test.go +++ b/pkg/master/master_openapi_test.go @@ -54,7 +54,7 @@ func TestValidOpenAPISpec(t *testing.T) { } config.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() - master, err := config.Complete().New(genericapiserver.EmptyDelegate, nil) + master, err := config.Complete().New(genericapiserver.EmptyDelegate) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index e69bd2701e9..05c800372f2 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -115,7 +115,7 @@ func setUp(t *testing.T) (*etcdtesting.EtcdTestServer, Config, *assert.Assertion func newMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { etcdserver, config, assert := setUp(t) - master, err := config.Complete().New(genericapiserver.EmptyDelegate, nil) + master, err := config.Complete().New(genericapiserver.EmptyDelegate) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } @@ -141,7 +141,7 @@ func limitedAPIResourceConfigSource() *serverstorage.ResourceConfig { func newLimitedMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { etcdserver, config, assert := setUp(t) config.APIResourceConfigSource = limitedAPIResourceConfigSource() - master, err := config.Complete().New(genericapiserver.EmptyDelegate, nil) + master, err := config.Complete().New(genericapiserver.EmptyDelegate) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } diff --git a/pkg/master/thirdparty/BUILD b/pkg/master/thirdparty/BUILD deleted file mode 100644 index 2e2a14bd5ff..00000000000 --- a/pkg/master/thirdparty/BUILD +++ /dev/null @@ -1,88 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "thirdparty.go", - "tprregistration_controller.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/client/informers/informers_generated/internalversion/extensions/internalversion:go_default_library", - "//pkg/client/listers/extensions/internalversion:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/registry/extensions/rest:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata/storage:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["tprregistration_controller_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/apis/extensions:go_default_library", - "//pkg/client/listers/extensions/internalversion:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", - ], -) diff --git a/pkg/master/thirdparty/thirdparty.go b/pkg/master/thirdparty/thirdparty.go deleted file mode 100644 index 8e00569f996..00000000000 --- a/pkg/master/thirdparty/thirdparty.go +++ /dev/null @@ -1,462 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdparty - -import ( - "fmt" - "strings" - "sync" - - "github.com/golang/glog" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - apiextensionsserver "k8s.io/apiextensions-apiserver/pkg/apiserver" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion" - "k8s.io/apiextensions-apiserver/pkg/registry/customresource" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/json" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - genericapi "k8s.io/apiserver/pkg/endpoints" - "k8s.io/apiserver/pkg/endpoints/discovery" - "k8s.io/apiserver/pkg/endpoints/request" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/generic" - "k8s.io/apiserver/pkg/registry/rest" - genericapiserver "k8s.io/apiserver/pkg/server" - serverstorgage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/apiserver/pkg/storage/storagebackend" - discoveryclient "k8s.io/client-go/discovery" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - extensionsrest "k8s.io/kubernetes/pkg/registry/extensions/rest" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" - thirdpartyresourcedatastore "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage" -) - -// dynamicLister is used to list resources for dynamic third party -// apis. It implements the genericapihandlers.APIResourceLister interface -type dynamicLister struct { - m *ThirdPartyResourceServer - path string -} - -func (d dynamicLister) ListAPIResources() []metav1.APIResource { - return d.m.getExistingThirdPartyResources(d.path) -} - -var _ discovery.APIResourceLister = &dynamicLister{} - -type ThirdPartyResourceServer struct { - genericAPIServer *genericapiserver.GenericAPIServer - - availableGroupManager discovery.GroupManager - - deleteCollectionWorkers int - - // storage for third party objects - thirdPartyStorageConfig *storagebackend.Config - // map from api path to a tuple of (storage for the objects, APIGroup) - thirdPartyResources map[string]*thirdPartyEntry - // protects the map - thirdPartyResourcesLock sync.RWMutex - - // Useful for reliable testing. Shouldn't be used otherwise. - disableThirdPartyControllerForTesting bool - - crdRESTOptionsGetter generic.RESTOptionsGetter -} - -func NewThirdPartyResourceServer(genericAPIServer *genericapiserver.GenericAPIServer, availableGroupManager discovery.GroupManager, storageFactory serverstorgage.StorageFactory, crdRESTOptionsGetter generic.RESTOptionsGetter) *ThirdPartyResourceServer { - ret := &ThirdPartyResourceServer{ - genericAPIServer: genericAPIServer, - thirdPartyResources: map[string]*thirdPartyEntry{}, - availableGroupManager: availableGroupManager, - crdRESTOptionsGetter: crdRESTOptionsGetter, - } - - var err error - ret.thirdPartyStorageConfig, err = storageFactory.NewConfig(extensions.Resource("thirdpartyresources")) - if err != nil { - glog.Fatalf("Error building third party storage: %v", err) - } - - return ret -} - -// thirdPartyEntry combines objects storage and API group into one struct -// for easy lookup. -type thirdPartyEntry struct { - // Map from plural resource name to entry - storage map[string]*thirdpartyresourcedatastore.REST - group metav1.APIGroup -} - -// HasThirdPartyResource returns true if a particular third party resource currently installed. -func (m *ThirdPartyResourceServer) HasThirdPartyResource(rsrc *extensions.ThirdPartyResource) (bool, error) { - kind, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) - if err != nil { - return false, err - } - path := extensionsrest.MakeThirdPartyPath(group) - m.thirdPartyResourcesLock.Lock() - defer m.thirdPartyResourcesLock.Unlock() - entry := m.thirdPartyResources[path] - if entry == nil { - return false, nil - } - plural, _ := meta.UnsafeGuessKindToResource(schema.GroupVersionKind{ - Group: group, - Version: rsrc.Versions[0].Name, - Kind: kind, - }) - _, found := entry.storage[plural.Resource] - return found, nil -} - -func (m *ThirdPartyResourceServer) removeThirdPartyStorage(path, resource string) error { - m.thirdPartyResourcesLock.Lock() - defer m.thirdPartyResourcesLock.Unlock() - entry, found := m.thirdPartyResources[path] - if !found { - return nil - } - storage, found := entry.storage[resource] - if !found { - return nil - } - if err := m.removeThirdPartyResourceData(&entry.group, resource, storage); err != nil { - return err - } - delete(entry.storage, resource) - if len(entry.storage) == 0 { - delete(m.thirdPartyResources, path) - m.availableGroupManager.RemoveGroup(extensionsrest.GetThirdPartyGroupName(path)) - } else { - m.thirdPartyResources[path] = entry - } - return nil -} - -// RemoveThirdPartyResource removes all resources matching `path`. Also deletes any stored data -func (m *ThirdPartyResourceServer) RemoveThirdPartyResource(path string) error { - ix := strings.LastIndex(path, "/") - if ix == -1 { - return fmt.Errorf("expected /, saw: %s", path) - } - resource := path[ix+1:] - path = path[0:ix] - - if err := m.removeThirdPartyStorage(path, resource); err != nil { - return err - } - - services := m.genericAPIServer.Handler.GoRestfulContainer.RegisteredWebServices() - for ix := range services { - root := services[ix].RootPath() - if root == path || strings.HasPrefix(root, path+"/") { - m.genericAPIServer.Handler.GoRestfulContainer.Remove(services[ix]) - } - } - return nil -} - -func (m *ThirdPartyResourceServer) removeThirdPartyResourceData(group *metav1.APIGroup, resource string, registry *thirdpartyresourcedatastore.REST) error { - // Freeze TPR data to prevent new writes via this apiserver process. - // Other apiservers can still write. This is best-effort because there - // are worse problems with TPR data than the possibility of going back - // in time when migrating to CRD [citation needed]. - registry.Freeze() - - ctx := genericapirequest.NewContext() - existingData, err := registry.List(ctx, nil) - if err != nil { - return err - } - list, ok := existingData.(*extensions.ThirdPartyResourceDataList) - if !ok { - return fmt.Errorf("expected a *ThirdPartyResourceDataList, got %T", existingData) - } - - // Migrate TPR data to CRD if requested. - gvk := schema.GroupVersionKind{Group: group.Name, Version: group.PreferredVersion.Version, Kind: registry.Kind()} - migrationRequested, err := m.migrateThirdPartyResourceData(gvk, resource, list) - if err != nil { - // Migration is best-effort. Log and continue. - utilruntime.HandleError(fmt.Errorf("failed to migrate TPR data: %v", err)) - } - - // Skip deletion of TPR data if migration was requested (whether or not it succeeded). - // This leaves the etcd data around for rollback, and to avoid sending DELETE watch events. - if migrationRequested { - return nil - } - - for i := range list.Items { - item := &list.Items[i] - - // Use registry.Store.Delete() to bypass the frozen registry.Delete(). - if _, _, err := registry.Store.Delete(genericapirequest.WithNamespace(ctx, item.Namespace), item.Name, nil); err != nil { - return err - } - } - return nil -} - -func (m *ThirdPartyResourceServer) findMatchingCRD(gvk schema.GroupVersionKind, resource string) (*apiextensions.CustomResourceDefinition, error) { - // CustomResourceDefinitionList does not implement the protobuf marshalling interface. - config := *m.genericAPIServer.LoopbackClientConfig - config.ContentType = "application/json" - crdClient, err := apiextensionsclient.NewForConfig(&config) - if err != nil { - return nil, fmt.Errorf("can't create apiextensions client: %v", err) - } - crdList, err := crdClient.CustomResourceDefinitions().List(metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("can't list CustomResourceDefinitions: %v", err) - } - for i := range crdList.Items { - item := &crdList.Items[i] - if item.Spec.Scope == apiextensions.NamespaceScoped && - item.Spec.Group == gvk.Group && item.Spec.Version == gvk.Version && - item.Status.AcceptedNames.Kind == gvk.Kind && item.Status.AcceptedNames.Plural == resource { - return item, nil - } - } - return nil, nil -} - -func (m *ThirdPartyResourceServer) migrateThirdPartyResourceData(gvk schema.GroupVersionKind, resource string, dataList *extensions.ThirdPartyResourceDataList) (bool, error) { - // A matching CustomResourceDefinition implies migration is requested. - crd, err := m.findMatchingCRD(gvk, resource) - if err != nil { - return false, fmt.Errorf("can't determine if TPR should migrate: %v", err) - } - if crd == nil { - // No migration requested. - return false, nil - } - - // Talk directly to CustomResource storage. - // We have to bypass the API server because TPR is shadowing CRD at this point. - storage := customresource.NewREST( - schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Spec.Names.Plural}, - schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Spec.Names.ListKind}, - apiextensionsserver.UnstructuredCopier{}, - customresource.NewStrategy(discoveryclient.NewUnstructuredObjectTyper(nil), true, gvk), - m.crdRESTOptionsGetter, - ) - - // Copy TPR data to CustomResource. - var errs []error - ctx := request.NewContext() - for i := range dataList.Items { - item := &dataList.Items[i] - - // Convert TPR data to Unstructured. - objMap := make(map[string]interface{}) - if err := json.Unmarshal(item.Data, &objMap); err != nil { - errs = append(errs, fmt.Errorf("can't unmarshal TPR data %q: %v", item.Name, err)) - continue - } - - // Convert metadata to Unstructured and merge with data. - // cf. thirdpartyresourcedata.encodeToJSON() - metaMap := make(map[string]interface{}) - buf, err := json.Marshal(&item.ObjectMeta) - if err != nil { - errs = append(errs, fmt.Errorf("can't marshal metadata for TPR data %q: %v", item.Name, err)) - continue - } - if err := json.Unmarshal(buf, &metaMap); err != nil { - errs = append(errs, fmt.Errorf("can't unmarshal TPR data %q: %v", item.Name, err)) - continue - } - // resourceVersion cannot be set when creating objects. - delete(metaMap, "resourceVersion") - objMap["metadata"] = metaMap - - // Store CustomResource. - obj := &unstructured.Unstructured{Object: objMap} - createCtx := request.WithNamespace(ctx, obj.GetNamespace()) - if _, err := storage.Create(createCtx, obj, false); err != nil { - errs = append(errs, fmt.Errorf("can't create CustomResource for TPR data %q: %v", item.Name, err)) - continue - } - } - return true, utilerrors.NewAggregate(errs) -} - -// ListThirdPartyResources lists all currently installed third party resources -// The format is / -func (m *ThirdPartyResourceServer) ListThirdPartyResources() []string { - m.thirdPartyResourcesLock.RLock() - defer m.thirdPartyResourcesLock.RUnlock() - result := []string{} - for key := range m.thirdPartyResources { - for rsrc := range m.thirdPartyResources[key].storage { - result = append(result, key+"/"+rsrc) - } - } - return result -} - -func (m *ThirdPartyResourceServer) getExistingThirdPartyResources(path string) []metav1.APIResource { - result := []metav1.APIResource{} - m.thirdPartyResourcesLock.Lock() - defer m.thirdPartyResourcesLock.Unlock() - entry := m.thirdPartyResources[path] - if entry != nil { - for key, obj := range entry.storage { - result = append(result, metav1.APIResource{ - Name: key, - Namespaced: true, - Kind: obj.Kind(), - Verbs: metav1.Verbs([]string{ - "delete", "deletecollection", "get", "list", "patch", "create", "update", "watch", - }), - }) - } - } - return result -} - -func (m *ThirdPartyResourceServer) hasThirdPartyGroupStorage(path string) bool { - m.thirdPartyResourcesLock.Lock() - defer m.thirdPartyResourcesLock.Unlock() - _, found := m.thirdPartyResources[path] - return found -} - -func (m *ThirdPartyResourceServer) addThirdPartyResourceStorage(path, resource string, storage *thirdpartyresourcedatastore.REST, apiGroup metav1.APIGroup) { - m.thirdPartyResourcesLock.Lock() - defer m.thirdPartyResourcesLock.Unlock() - entry, found := m.thirdPartyResources[path] - if entry == nil { - entry = &thirdPartyEntry{ - group: apiGroup, - storage: map[string]*thirdpartyresourcedatastore.REST{}, - } - m.thirdPartyResources[path] = entry - } - entry.storage[resource] = storage - if !found { - m.availableGroupManager.AddGroup(apiGroup) - } -} - -// InstallThirdPartyResource installs a third party resource specified by 'rsrc'. When a resource is -// installed a corresponding RESTful resource is added as a valid path in the web service provided by -// the master. -// -// For example, if you install a resource ThirdPartyResource{ Name: "foo.company.com", Versions: {"v1"} } -// then the following RESTful resource is created on the server: -// http:///apis/company.com/v1/foos/... -func (m *ThirdPartyResourceServer) InstallThirdPartyResource(rsrc *extensions.ThirdPartyResource) error { - kind, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) - if err != nil { - return err - } - if len(rsrc.Versions) == 0 { - return fmt.Errorf("ThirdPartyResource %s has no defined versions", rsrc.Name) - } - plural, _ := meta.UnsafeGuessKindToResource(schema.GroupVersionKind{ - Group: group, - Version: rsrc.Versions[0].Name, - Kind: kind, - }) - path := extensionsrest.MakeThirdPartyPath(group) - - groupVersion := metav1.GroupVersionForDiscovery{ - GroupVersion: group + "/" + rsrc.Versions[0].Name, - Version: rsrc.Versions[0].Name, - } - apiGroup := metav1.APIGroup{ - Name: group, - Versions: []metav1.GroupVersionForDiscovery{groupVersion}, - PreferredVersion: groupVersion, - } - - thirdparty := m.thirdpartyapi(group, kind, rsrc.Versions[0].Name, plural.Resource) - - // If storage exists, this group has already been added, just update - // the group with the new API - if m.hasThirdPartyGroupStorage(path) { - m.addThirdPartyResourceStorage(path, plural.Resource, thirdparty.Storage[plural.Resource].(*thirdpartyresourcedatastore.REST), apiGroup) - return thirdparty.UpdateREST(m.genericAPIServer.Handler.GoRestfulContainer) - } - - if err := thirdparty.InstallREST(m.genericAPIServer.Handler.GoRestfulContainer); err != nil { - glog.Errorf("Unable to setup thirdparty api: %v", err) - } - m.genericAPIServer.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(api.Codecs, apiGroup, m.genericAPIServer.RequestContextMapper()).WebService()) - - m.addThirdPartyResourceStorage(path, plural.Resource, thirdparty.Storage[plural.Resource].(*thirdpartyresourcedatastore.REST), apiGroup) - api.Registry.AddThirdPartyAPIGroupVersions(schema.GroupVersion{Group: group, Version: rsrc.Versions[0].Name}) - return nil -} - -func (m *ThirdPartyResourceServer) thirdpartyapi(group, kind, version, pluralResource string) *genericapi.APIGroupVersion { - resourceStorage := thirdpartyresourcedatastore.NewREST( - generic.RESTOptions{ - StorageConfig: m.thirdPartyStorageConfig, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: m.deleteCollectionWorkers, - }, - group, - kind, - ) - - storage := map[string]rest.Storage{ - pluralResource: resourceStorage, - } - - optionsExternalVersion := api.Registry.GroupOrDie(api.GroupName).GroupVersion - internalVersion := schema.GroupVersion{Group: group, Version: runtime.APIVersionInternal} - externalVersion := schema.GroupVersion{Group: group, Version: version} - - apiRoot := extensionsrest.MakeThirdPartyPath("") - return &genericapi.APIGroupVersion{ - Root: apiRoot, - GroupVersion: externalVersion, - - Creater: thirdpartyresourcedata.NewObjectCreator(group, version, api.Scheme), - Convertor: api.Scheme, - Copier: api.Scheme, - Defaulter: api.Scheme, - Typer: api.Scheme, - UnsafeConvertor: api.Scheme, - - Mapper: thirdpartyresourcedata.NewMapper(api.Registry.GroupOrDie(extensions.GroupName).RESTMapper, kind, version, group), - Linker: api.Registry.GroupOrDie(extensions.GroupName).SelfLinker, - Storage: storage, - OptionsExternalVersion: &optionsExternalVersion, - - Serializer: thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, kind, externalVersion, internalVersion), - ParameterCodec: thirdpartyresourcedata.NewThirdPartyParameterCodec(api.ParameterCodec), - - Context: m.genericAPIServer.RequestContextMapper(), - - MinRequestTimeout: m.genericAPIServer.MinRequestTimeout(), - - ResourceLister: dynamicLister{m, extensionsrest.MakeThirdPartyPath(group)}, - } -} diff --git a/pkg/printers/BUILD b/pkg/printers/BUILD index e305750db9c..cc22aa2c112 100644 --- a/pkg/printers/BUILD +++ b/pkg/printers/BUILD @@ -25,8 +25,6 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/util/slice:go_default_library", - "//vendor/github.com/fatih/camelcase:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -69,11 +67,3 @@ filegroup( ], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["humanreadable_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library"], -) diff --git a/pkg/printers/humanreadable.go b/pkg/printers/humanreadable.go index 59bcf7e76ea..a4b1945746c 100644 --- a/pkg/printers/humanreadable.go +++ b/pkg/printers/humanreadable.go @@ -21,12 +21,9 @@ import ( "fmt" "io" "reflect" - "sort" "strings" "text/tabwriter" - "github.com/fatih/camelcase" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1" @@ -34,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/slice" ) type TablePrinter interface { @@ -44,6 +40,7 @@ type TablePrinter interface { type PrintHandler interface { Handler(columns, columnsWithWide []string, printFunc interface{}) error TableHandler(columns []metav1alpha1.TableColumnDefinition, printFunc interface{}) error + DefaultTableHandler(columns []metav1alpha1.TableColumnDefinition, printFunc interface{}) error } var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. @@ -60,12 +57,13 @@ type handlerEntry struct { // will only be printed if the object type changes. This makes it useful for printing items // received from watches. type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - options PrintOptions - lastType reflect.Type - skipTabWriter bool - encoder runtime.Encoder - decoder runtime.Decoder + handlerMap map[reflect.Type]*handlerEntry + defaultHandler *handlerEntry + options PrintOptions + lastType interface{} + skipTabWriter bool + encoder runtime.Encoder + decoder runtime.Decoder } var _ PrintHandler = &HumanReadablePrinter{} @@ -188,6 +186,25 @@ func (h *HumanReadablePrinter) TableHandler(columnDefinitions []metav1alpha1.Tab return nil } +// DefaultTableHandler registers a set of columns and a print func that is given a chance to process +// any object without an explicit handler. Only the most recently set print handler is used. +// See ValidateRowPrintHandlerFunc for required method signature. +func (h *HumanReadablePrinter) DefaultTableHandler(columnDefinitions []metav1alpha1.TableColumnDefinition, printFunc interface{}) error { + printFuncValue := reflect.ValueOf(printFunc) + if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) + return err + } + entry := &handlerEntry{ + columnDefinitions: columnDefinitions, + printRows: true, + printFunc: printFuncValue, + } + + h.defaultHandler = entry + return nil +} + // ValidateRowPrintHandlerFunc validates print handler signature. // printFunc is the function that will be called to print an object. // It must be of the following type: @@ -266,7 +283,7 @@ func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { return err } -func (h *HumanReadablePrinter) printHeader(columnNames []string, w io.Writer) error { +func printHeader(columnNames []string, w io.Writer) error { if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { return err } @@ -299,141 +316,24 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er obj, _ = decodeUnknownObject(obj, h.encoder, h.decoder) } + // print with a registered handler t := reflect.TypeOf(obj) if handler := h.handlerMap[t]; handler != nil { - if !h.options.NoHeaders && t != h.lastType { - var headers []string - for _, column := range handler.columnDefinitions { - if column.Priority != 0 && !h.options.Wide { - continue - } - headers = append(headers, strings.ToUpper(column.Name)) - } - headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) - if h.options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - h.printHeader(headers, output) - h.lastType = t - } - - if handler.printRows { - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(h.options)} - results := handler.printFunc.Call(args) - if results[1].IsNil() { - rows := results[0].Interface().([]metav1alpha1.TableRow) - for _, row := range rows { - - if h.options.WithNamespace { - if obj := row.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - fmt.Fprint(output, m.GetNamespace()) - } - } - fmt.Fprint(output, "\t") - } - - for i, cell := range row.Cells { - if i != 0 { - fmt.Fprint(output, "\t") - } else { - // TODO: remove this once we drop the legacy printers - if h.options.WithKind && len(h.options.Kind) > 0 { - fmt.Fprintf(output, "%s/%s", h.options.Kind, cell) - continue - } - } - fmt.Fprint(output, cell) - } - - hasLabels := len(h.options.ColumnLabels) > 0 - if obj := row.Object.Object; obj != nil && (hasLabels || h.options.ShowLabels) { - if m, err := meta.Accessor(obj); err == nil { - for _, value := range labelValues(m.GetLabels(), h.options) { - output.Write([]byte("\t")) - output.Write([]byte(value)) - } - } - } - - output.Write([]byte("\n")) - } - return nil - } - return results[1].Interface().(error) - } - - // TODO: this code path is deprecated and will be removed when all handlers are row printers - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(output), reflect.ValueOf(h.options)} - resultValue := handler.printFunc.Call(args)[0] - if resultValue.IsNil() { - return nil - } - return resultValue.Interface().(error) - } - - if _, err := meta.Accessor(obj); err == nil { - // we don't recognize this type, but we can still attempt to print some reasonable information about. - unstructured, ok := obj.(runtime.Unstructured) - if !ok { - return fmt.Errorf("error: unknown type %T, expected unstructured in %#v", obj, h.handlerMap) - } - - content := unstructured.UnstructuredContent() - - // we'll elect a few more fields to print depending on how much columns are already taken - maxDiscoveredFieldsToPrint := 3 - maxDiscoveredFieldsToPrint = maxDiscoveredFieldsToPrint - len(h.options.ColumnLabels) - if h.options.WithNamespace { // where's my ternary - maxDiscoveredFieldsToPrint-- - } - if h.options.ShowLabels { - maxDiscoveredFieldsToPrint-- - } - if maxDiscoveredFieldsToPrint < 0 { - maxDiscoveredFieldsToPrint = 0 - } - - var discoveredFieldNames []string // we want it predictable so this will be used to sort - ignoreIfDiscovered := []string{"kind", "apiVersion"} // these are already covered - for field, value := range content { - if slice.ContainsString(ignoreIfDiscovered, field, nil) { - continue - } - switch value.(type) { - case map[string]interface{}: - // just simpler types - continue - } - discoveredFieldNames = append(discoveredFieldNames, field) - } - sort.Strings(discoveredFieldNames) - if len(discoveredFieldNames) > maxDiscoveredFieldsToPrint { - discoveredFieldNames = discoveredFieldNames[:maxDiscoveredFieldsToPrint] - } - - if !h.options.NoHeaders && t != h.lastType { - headers := []string{"NAME", "KIND"} - for _, discoveredField := range discoveredFieldNames { - fieldAsHeader := strings.ToUpper(strings.Join(camelcase.Split(discoveredField), " ")) - headers = append(headers, fieldAsHeader) - } - headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) - if h.options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - h.printHeader(headers, output) - h.lastType = t - } - - // if the error isn't nil, report the "I don't recognize this" error - if err := printUnstructured(unstructured, output, discoveredFieldNames, h.options); err != nil { + includeHeaders := h.lastType != t && !h.options.NoHeaders + if err := printRowsForHandlerEntry(output, handler, obj, h.options, includeHeaders); err != nil { return err } + h.lastType = t + return nil + } + + // print with the default handler if set, and use the columns from the last time + if h.defaultHandler != nil { + includeHeaders := h.lastType != h.defaultHandler && !h.options.NoHeaders + if err := printRowsForHandlerEntry(output, h.defaultHandler, obj, h.options, includeHeaders); err != nil { + return err + } + h.lastType = h.defaultHandler return nil } @@ -631,6 +531,87 @@ func (h *HumanReadablePrinter) PrintTable(obj runtime.Object, options PrintOptio return table, nil } +// printRowsForHandlerEntry prints the incremental table output (headers if the current type is +// different from lastType) including all the rows in the object. It returns the current type +// or an error, if any. +func printRowsForHandlerEntry(output io.Writer, handler *handlerEntry, obj runtime.Object, options PrintOptions, includeHeaders bool) error { + if includeHeaders { + var headers []string + for _, column := range handler.columnDefinitions { + if column.Priority != 0 && !options.Wide { + continue + } + headers = append(headers, strings.ToUpper(column.Name)) + } + headers = append(headers, formatLabelHeaders(options.ColumnLabels)...) + // LABELS is always the last column. + headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...) + if options.WithNamespace { + headers = append(withNamespacePrefixColumns, headers...) + } + printHeader(headers, output) + } + + if !handler.printRows { + // TODO: this code path is deprecated and will be removed when all handlers are row printers + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(output), reflect.ValueOf(options)} + resultValue := handler.printFunc.Call(args)[0] + if resultValue.IsNil() { + return nil + } + return resultValue.Interface().(error) + } + + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} + results := handler.printFunc.Call(args) + if results[1].IsNil() { + rows := results[0].Interface().([]metav1alpha1.TableRow) + printRows(output, rows, options) + return nil + } + return results[1].Interface().(error) + +} + +// printRows writes the provided rows to output. +func printRows(output io.Writer, rows []metav1alpha1.TableRow, options PrintOptions) { + for _, row := range rows { + if options.WithNamespace { + if obj := row.Object.Object; obj != nil { + if m, err := meta.Accessor(obj); err == nil { + fmt.Fprint(output, m.GetNamespace()) + } + } + fmt.Fprint(output, "\t") + } + + for i, cell := range row.Cells { + if i != 0 { + fmt.Fprint(output, "\t") + } else { + // TODO: remove this once we drop the legacy printers + if options.WithKind && len(options.Kind) > 0 { + fmt.Fprintf(output, "%s/%s", options.Kind, cell) + continue + } + } + fmt.Fprint(output, cell) + } + + hasLabels := len(options.ColumnLabels) > 0 + if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) { + if m, err := meta.Accessor(obj); err == nil { + for _, value := range labelValues(m.GetLabels(), options) { + output.Write([]byte("\t")) + output.Write([]byte(value)) + } + } + } + + output.Write([]byte("\n")) + } +} + // legacyPrinterToTable uses the old printFunc with tabbed writer to generate a table. // TODO: remove when all legacy printers are removed. func (h *HumanReadablePrinter) legacyPrinterToTable(obj runtime.Object, handler *handlerEntry) (*metav1alpha1.Table, error) { @@ -754,12 +735,9 @@ func formatLabelHeaders(columnLabels []string) []string { } // headers for --show-labels=true -func formatShowLabelsHeader(showLabels bool, t reflect.Type) []string { +func formatShowLabelsHeader(showLabels bool) []string { if showLabels { - // TODO: this is all sorts of hack, fix - if t.String() != "*api.ThirdPartyResource" && t.String() != "*api.ThirdPartyResourceList" { - return []string{"LABELS"} - } + return []string{"LABELS"} } return nil } diff --git a/pkg/printers/humanreadable_test.go b/pkg/printers/humanreadable_test.go deleted file mode 100644 index 2649ba34e23..00000000000 --- a/pkg/printers/humanreadable_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "bytes" - "regexp" - "testing" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -func TestPrintUnstructuredObject(t *testing.T) { - tests := []struct { - expected string - options PrintOptions - }{ - { - expected: "NAME\\s+KIND\\s+DUMMY 1\\s+DUMMY 2\\s+ITEMS\nMyName\\s+Test\\.v1\\.\\s+present\\s+present\\s+1 item\\(s\\)", - }, - { - options: PrintOptions{ - WithNamespace: true, - }, - expected: "NAMESPACE\\s+NAME\\s+KIND\\s+DUMMY 1\\s+DUMMY 2\nMyNamespace\\s+MyName\\s+Test\\.v1\\.\\s+present\\s+present", - }, - { - options: PrintOptions{ - ShowLabels: true, - WithNamespace: true, - }, - expected: "NAMESPACE\\s+NAME\\s+KIND\\s+DUMMY 1\\s+LABELS\nMyNamespace\\s+MyName\\s+Test\\.v1\\.\\s+present\\s+", - }, - } - out := bytes.NewBuffer([]byte{}) - - obj := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Test", - "dummy1": "present", - "dummy2": "present", - "metadata": map[string]interface{}{ - "name": "MyName", - "namespace": "MyNamespace", - "creationTimestamp": "2017-04-01T00:00:00Z", - "resourceVersion": 123, - "uid": "00000000-0000-0000-0000-000000000001", - "dummy3": "present", - }, - "items": []interface{}{ - map[string]interface{}{ - "itemBool": true, - "itemInt": 42, - }, - }, - "url": "http://localhost", - "status": "ok", - }, - } - - for _, test := range tests { - printer := &HumanReadablePrinter{ - options: test.options, - } - printer.PrintObj(obj, out) - - matches, err := regexp.MatchString(test.expected, out.String()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !matches { - t.Errorf("wanted %s, got %s", test.expected, out) - } - } -} diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index 39332c8dfa8..5002f8cadf1 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -59,7 +59,6 @@ go_library( ], tags = ["automanaged"], deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//federation/apis/federation:go_default_library", "//federation/client/clientset_generated/federation_internalclientset:go_default_library", "//pkg/api:go_default_library", @@ -76,7 +75,6 @@ go_library( "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/rbac:go_default_library", - "//pkg/apis/settings:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/util:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index 6ed8a1a94f2..1fd5ed356a8 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -231,7 +231,7 @@ func printUnstructuredContent(w PrefixWriter, level int, content map[string]inte if slice.ContainsString(skip, skipExpr, nil) { continue } - w.Write(level, fmt.Sprintf("%s:\n", smartLabelFor(field))) + w.Write(level, "%s:\n", smartLabelFor(field)) printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...) case []interface{}: @@ -239,13 +239,13 @@ func printUnstructuredContent(w PrefixWriter, level int, content map[string]inte if slice.ContainsString(skip, skipExpr, nil) { continue } - w.Write(level, fmt.Sprintf("%s:\n", smartLabelFor(field))) + w.Write(level, "%s:\n", smartLabelFor(field)) for _, child := range typedValue { switch typedChild := child.(type) { case map[string]interface{}: printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...) default: - w.Write(level+1, fmt.Sprintf("%v\n", typedChild)) + w.Write(level+1, "%v\n", typedChild) } } @@ -254,7 +254,7 @@ func printUnstructuredContent(w PrefixWriter, level int, content map[string]inte if slice.ContainsString(skip, skipExpr, nil) { continue } - w.Write(level, fmt.Sprintf("%s:\t%v\n", smartLabelFor(field), typedValue)) + w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue) } } } @@ -1421,7 +1421,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri return "", err } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector)) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID) if err != nil { return "", err } @@ -1498,7 +1498,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings return "", err } - running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector) + running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID) var events *api.EventList if describerSettings.ShowEvents { @@ -1698,7 +1698,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings if err != nil { return "", err } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID) if err != nil { return "", err } @@ -2360,7 +2360,6 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", node.Name) - w.Write(LEVEL_0, "Role:\t%s\n", findNodeRole(node)) printLabelsMultiline(w, "Labels", node.Labels) printAnnotationsMultiline(w, "Annotations", node.Annotations) printNodeTaintsMultiline(w, "Taints", node.Spec.Taints) @@ -2453,7 +2452,7 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting return "", err } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID) if err != nil { return "", err } @@ -2838,13 +2837,18 @@ func printReplicaSetsByLabels(matchingRSs []*versionedextension.ReplicaSet) stri return list } -func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) { +func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { options := metav1.ListOptions{LabelSelector: selector.String()} rcPods, err := c.List(options) if err != nil { return } for _, pod := range rcPods.Items { + controllerRef := controller.GetControllerOf(&pod) + // Skip pods that are orphans or owned by other controllers. + if controllerRef == nil || controllerRef.UID != uid { + continue + } switch pod.Status.Phase { case api.PodRunning: running++ diff --git a/pkg/printers/internalversion/describe_test.go b/pkg/printers/internalversion/describe_test.go index 7e028755813..f1c67dde9f8 100644 --- a/pkg/printers/internalversion/describe_test.go +++ b/pkg/printers/internalversion/describe_test.go @@ -1492,3 +1492,98 @@ func TestDescribeResourceQuota(t *testing.T) { } } } + +// boolPtr returns a pointer to a bool +func boolPtr(b bool) *bool { + o := b + return &o +} + +func TestControllerRef(t *testing.T) { + f := fake.NewSimpleClientset( + &api.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "foo", + UID: "123456", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicationController", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"abc": "xyz"}, + Template: &api.PodTemplateSpec{ + Spec: api.PodSpec{ + Containers: []api.Container{ + {Image: "mytest-image:latest"}, + }, + }, + }, + }, + }, + &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "barpod", + Namespace: "foo", + Labels: map[string]string{"abc": "xyz"}, + OwnerReferences: []metav1.OwnerReference{{Name: "bar", UID: "123456", Controller: boolPtr(true)}}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Image: "mytest-image:latest"}, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + }, + &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "orphan", + Namespace: "foo", + Labels: map[string]string{"abc": "xyz"}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Image: "mytest-image:latest"}, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + }, + &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "buzpod", + Namespace: "foo", + Labels: map[string]string{"abc": "xyz"}, + OwnerReferences: []metav1.OwnerReference{{Name: "buz", UID: "654321", Controller: boolPtr(true)}}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Image: "mytest-image:latest"}, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + }) + d := ReplicationControllerDescriber{f} + out, err := d.Describe("foo", "bar", printers.DescriberSettings{ShowEvents: false}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "1 Running") { + t.Errorf("unexpected out: %s", out) + } +} diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 9fc37422a21..443790e2756 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -30,12 +30,12 @@ import ( batchv2alpha1 "k8s.io/api/batch/v2alpha1" apiv1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/events" @@ -48,7 +48,6 @@ import ( "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" "k8s.io/kubernetes/pkg/controller" @@ -69,8 +68,6 @@ var ( nodeColumns = []string{"NAME", "STATUS", "AGE", "VERSION"} nodeWideColumns = []string{"EXTERNAL-IP", "OS-IMAGE", "KERNEL-VERSION", "CONTAINER-RUNTIME"} eventColumns = []string{"LASTSEEN", "FIRSTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"} - limitRangeColumns = []string{"NAME", "AGE"} - resourceQuotaColumns = []string{"NAME", "AGE"} namespaceColumns = []string{"NAME", "STATUS", "AGE"} secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"} serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"} @@ -78,17 +75,13 @@ var ( persistentVolumeClaimColumns = []string{"NAME", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "STORAGECLASS", "AGE"} componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} - roleColumns = []string{"NAME", "AGE"} roleBindingColumns = []string{"NAME", "AGE"} roleBindingWideColumns = []string{"ROLE", "USERS", "GROUPS", "SERVICEACCOUNTS"} - clusterRoleColumns = []string{"NAME", "AGE"} clusterRoleBindingColumns = []string{"NAME", "AGE"} clusterRoleBindingWideColumns = []string{"ROLE", "USERS", "GROUPS", "SERVICEACCOUNTS"} storageClassColumns = []string{"NAME", "PROVISIONER"} statusColumns = []string{"STATUS", "REASON", "MESSAGE"} - // TODO: consider having 'KIND' for third party resource data - thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGETS", "MINPODS", "MAXPODS", "REPLICAS", "AGE"} deploymentColumns = []string{"NAME", "DESIRED", "CURRENT", "UP-TO-DATE", "AVAILABLE", "AGE"} deploymentWideColumns = []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} @@ -215,10 +208,6 @@ func AddHandlers(h printers.PrintHandler) { h.Handler(nodeColumns, nodeWideColumns, printNodeList) h.Handler(eventColumns, nil, printEvent) h.Handler(eventColumns, nil, printEventList) - h.Handler(limitRangeColumns, nil, printLimitRange) - h.Handler(limitRangeColumns, nil, printLimitRangeList) - h.Handler(resourceQuotaColumns, nil, printResourceQuota) - h.Handler(resourceQuotaColumns, nil, printResourceQuotaList) h.Handler(namespaceColumns, nil, printNamespace) h.Handler(namespaceColumns, nil, printNamespaceList) h.Handler(secretColumns, nil, printSecret) @@ -241,31 +230,65 @@ func AddHandlers(h printers.PrintHandler) { h.Handler(configMapColumns, nil, printConfigMapList) h.Handler(podSecurityPolicyColumns, nil, printPodSecurityPolicy) h.Handler(podSecurityPolicyColumns, nil, printPodSecurityPolicyList) - h.Handler(thirdPartyResourceDataColumns, nil, printThirdPartyResourceData) - h.Handler(thirdPartyResourceDataColumns, nil, printThirdPartyResourceDataList) h.Handler(clusterColumns, nil, printCluster) h.Handler(clusterColumns, nil, printClusterList) h.Handler(networkPolicyColumns, nil, printExtensionsNetworkPolicy) h.Handler(networkPolicyColumns, nil, printExtensionsNetworkPolicyList) h.Handler(networkPolicyColumns, nil, printNetworkPolicy) h.Handler(networkPolicyColumns, nil, printNetworkPolicyList) - h.Handler(roleColumns, nil, printRole) - h.Handler(roleColumns, nil, printRoleList) h.Handler(roleBindingColumns, roleBindingWideColumns, printRoleBinding) h.Handler(roleBindingColumns, roleBindingWideColumns, printRoleBindingList) - h.Handler(clusterRoleColumns, nil, printClusterRole) - h.Handler(clusterRoleColumns, nil, printClusterRoleList) h.Handler(clusterRoleBindingColumns, clusterRoleBindingWideColumns, printClusterRoleBinding) h.Handler(clusterRoleBindingColumns, clusterRoleBindingWideColumns, printClusterRoleBindingList) h.Handler(certificateSigningRequestColumns, nil, printCertificateSigningRequest) h.Handler(certificateSigningRequestColumns, nil, printCertificateSigningRequestList) h.Handler(storageClassColumns, nil, printStorageClass) h.Handler(storageClassColumns, nil, printStorageClassList) - h.Handler(podPresetColumns, nil, printPodPreset) - h.Handler(podPresetColumns, nil, printPodPresetList) h.Handler(statusColumns, nil, printStatus) h.Handler(controllerRevisionColumns, nil, printControllerRevision) h.Handler(controllerRevisionColumns, nil, printControllerRevisionList) + + AddDefaultHandlers(h) +} + +// AddDefaultHandlers adds handlers that can work with most Kubernetes objects. +func AddDefaultHandlers(h printers.PrintHandler) { + // types without defined columns + objectMetaColumnDefinitions := []metav1alpha1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, + } + h.DefaultTableHandler(objectMetaColumnDefinitions, printObjectMeta) +} + +func printObjectMeta(obj runtime.Object, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { + if meta.IsListType(obj) { + rows := make([]metav1alpha1.TableRow, 0, 16) + err := meta.EachListItem(obj, func(obj runtime.Object) error { + nestedRows, err := printObjectMeta(obj, options) + if err != nil { + return err + } + rows = append(rows, nestedRows...) + return nil + }) + if err != nil { + return nil, err + } + return rows, nil + } + + rows := make([]metav1alpha1.TableRow, 0, 1) + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + row := metav1alpha1.TableRow{ + Object: runtime.RawExtension{Object: obj}, + } + row.Cells = append(row.Cells, m.GetName(), translateTimestamp(m.GetCreationTimestamp())) + rows = append(rows, row) + return rows, nil } // Pass ports=nil for all ports. @@ -667,8 +690,12 @@ func getServiceExternalIP(svc *api.Service, wide bool) string { case api.ServiceTypeLoadBalancer: lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer, wide) if len(svc.Spec.ExternalIPs) > 0 { - result := append(strings.Split(lbIps, ","), svc.Spec.ExternalIPs...) - return strings.Join(result, ",") + results := []string{} + if len(lbIps) > 0 { + results = append(results, strings.Split(lbIps, ",")...) + } + results = append(results, svc.Spec.ExternalIPs...) + return strings.Join(results, ",") } if len(lbIps) > 0 { return lbIps @@ -697,7 +724,14 @@ func printService(svc *api.Service, w io.Writer, options printers.PrintOptions) namespace := svc.Namespace svcType := svc.Spec.Type internalIP := svc.Spec.ClusterIP + if len(internalIP) == 0 { + internalIP = "" + } externalIP := getServiceExternalIP(svc, options.Wide) + svcPorts := makePortString(svc.Spec.Ports) + if len(svcPorts) == 0 { + svcPorts = "" + } if options.WithNamespace { if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { @@ -709,7 +743,7 @@ func printService(svc *api.Service, w io.Writer, options printers.PrintOptions) string(svcType), internalIP, externalIP, - makePortString(svc.Spec.Ports), + svcPorts, translateTimestamp(svc.CreationTimestamp), ); err != nil { return err @@ -1033,10 +1067,6 @@ func printNode(node *api.Node, w io.Writer, options printers.PrintOptions) error if node.Spec.Unschedulable { status = append(status, "SchedulingDisabled") } - role := findNodeRole(node) - if role != "" { - status = append(status, role) - } if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp), node.Status.NodeInfo.KubeletVersion); err != nil { return err @@ -1076,19 +1106,6 @@ func getNodeExternalIP(node *api.Node) string { return "" } -// findNodeRole returns the role of a given node, or "" if none found. -// The role is determined by looking in order for: -// * a kubernetes.io/role label -// * a kubeadm.alpha.kubernetes.io/role label -// If no role is found, ("", nil) is returned -func findNodeRole(node *api.Node) string { - if role := node.Labels[kubeadm.NodeLabelKubeadmAlphaRole]; role != "" { - return role - } - // No role found - return "" -} - func printNodeList(list *api.NodeList, w io.Writer, options printers.PrintOptions) error { for _, node := range list.Items { if err := printNode(&node, w, options); err != nil { @@ -1238,72 +1255,6 @@ func printEventList(list *api.EventList, w io.Writer, options printers.PrintOpti return nil } -func printLimitRange(limitRange *api.LimitRange, w io.Writer, options printers.PrintOptions) error { - return printObjectMeta(limitRange.ObjectMeta, w, options, true) -} - -// Prints the LimitRangeList in a human-friendly format. -func printLimitRangeList(list *api.LimitRangeList, w io.Writer, options printers.PrintOptions) error { - for i := range list.Items { - if err := printLimitRange(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -// printObjectMeta prints the object metadata of a given resource. -func printObjectMeta(meta metav1.ObjectMeta, w io.Writer, options printers.PrintOptions, namespaced bool) error { - name := printers.FormatResourceName(options.Kind, meta.Name, options.WithKind) - - if namespaced && options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", meta.Namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf( - w, "%s\t%s", - name, - translateTimestamp(meta.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, printers.AppendLabels(meta.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, meta.Labels)) - return err -} - -func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, options printers.PrintOptions) error { - return printObjectMeta(resourceQuota.ObjectMeta, w, options, true) -} - -// Prints the ResourceQuotaList in a human-friendly format. -func printResourceQuotaList(list *api.ResourceQuotaList, w io.Writer, options printers.PrintOptions) error { - for i := range list.Items { - if err := printResourceQuota(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printRole(role *rbac.Role, w io.Writer, options printers.PrintOptions) error { - return printObjectMeta(role.ObjectMeta, w, options, true) -} - -// Prints the Role in a human-friendly format. -func printRoleList(list *rbac.RoleList, w io.Writer, options printers.PrintOptions) error { - for i := range list.Items { - if err := printRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - func printRoleBinding(roleBinding *rbac.RoleBinding, w io.Writer, options printers.PrintOptions) error { meta := roleBinding.ObjectMeta name := printers.FormatResourceName(options.Kind, meta.Name, options.WithKind) @@ -1352,23 +1303,6 @@ func printRoleBindingList(list *rbac.RoleBindingList, w io.Writer, options print return nil } -func printClusterRole(clusterRole *rbac.ClusterRole, w io.Writer, options printers.PrintOptions) error { - if options.WithNamespace { - return fmt.Errorf("clusterRole is not namespaced") - } - return printObjectMeta(clusterRole.ObjectMeta, w, options, false) -} - -// Prints the ClusterRole in a human-friendly format. -func printClusterRoleList(list *rbac.ClusterRoleList, w io.Writer, options printers.PrintOptions) error { - for i := range list.Items { - if err := printClusterRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - func printClusterRoleBinding(clusterRoleBinding *rbac.ClusterRoleBinding, w io.Writer, options printers.PrintOptions) error { meta := clusterRoleBinding.ObjectMeta name := printers.FormatResourceName(options.Kind, meta.Name, options.WithKind) @@ -1550,30 +1484,6 @@ func truncate(str string, maxLen int) string { return str } -func printThirdPartyResourceData(rsrc *extensions.ThirdPartyResourceData, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, rsrc.Name, options.WithKind) - - l := labels.FormatLabels(rsrc.Labels) - truncateCols := 50 - if options.Wide { - truncateCols = 100 - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", name, l, truncate(string(rsrc.Data), truncateCols)); err != nil { - return err - } - return nil -} - -func printThirdPartyResourceDataList(list *extensions.ThirdPartyResourceDataList, w io.Writer, options printers.PrintOptions) error { - for _, item := range list.Items { - if err := printThirdPartyResourceData(&item, w, options); err != nil { - return err - } - } - - return nil -} - func printDeployment(deployment *extensions.Deployment, w io.Writer, options printers.PrintOptions) error { name := printers.FormatResourceName(options.Kind, deployment.Name, options.WithKind) @@ -1871,19 +1781,6 @@ func printStorageClassList(scList *storage.StorageClassList, w io.Writer, option return nil } -func printPodPreset(podPreset *settings.PodPreset, w io.Writer, options printers.PrintOptions) error { - return printObjectMeta(podPreset.ObjectMeta, w, options, false) -} - -func printPodPresetList(list *settings.PodPresetList, w io.Writer, options printers.PrintOptions) error { - for i := range list.Items { - if err := printPodPreset(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - func printStatus(status *metav1.Status, w io.Writer, options printers.PrintOptions) error { if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", status.Status, status.Reason, status.Message); err != nil { return err diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index ffc0649fa1e..a2083a8e264 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "reflect" + "regexp" "strconv" "strings" "testing" @@ -103,6 +104,112 @@ func TestPrintDefault(t *testing.T) { } } +func TestPrintUnstructuredObject(t *testing.T) { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Test", + "dummy1": "present", + "dummy2": "present", + "metadata": map[string]interface{}{ + "name": "MyName", + "namespace": "MyNamespace", + "creationTimestamp": "2017-04-01T00:00:00Z", + "resourceVersion": 123, + "uid": "00000000-0000-0000-0000-000000000001", + "dummy3": "present", + "labels": map[string]interface{}{"test": "other"}, + }, + /*"items": []interface{}{ + map[string]interface{}{ + "itemBool": true, + "itemInt": 42, + }, + },*/ + "url": "http://localhost", + "status": "ok", + }, + } + + tests := []struct { + expected string + options printers.PrintOptions + object runtime.Object + }{ + { + expected: "NAME\\s+AGE\nMyName\\s+\\d+", + object: obj, + }, + { + options: printers.PrintOptions{ + WithNamespace: true, + }, + expected: "NAMESPACE\\s+NAME\\s+AGE\nMyNamespace\\s+MyName\\s+\\d+", + object: obj, + }, + { + options: printers.PrintOptions{ + ShowLabels: true, + WithNamespace: true, + }, + expected: "NAMESPACE\\s+NAME\\s+AGE\\s+LABELS\nMyNamespace\\s+MyName\\s+\\d+\\w+\\s+test\\=other", + object: obj, + }, + { + expected: "NAME\\s+AGE\nMyName\\s+\\d+\\w+\nMyName2\\s+\\d+", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Test", + "dummy1": "present", + "dummy2": "present", + "items": []interface{}{ + map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "MyName", + "namespace": "MyNamespace", + "creationTimestamp": "2017-04-01T00:00:00Z", + "resourceVersion": 123, + "uid": "00000000-0000-0000-0000-000000000001", + "dummy3": "present", + "labels": map[string]interface{}{"test": "other"}, + }, + }, + map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "MyName2", + "namespace": "MyNamespace", + "creationTimestamp": "2017-04-01T00:00:00Z", + "resourceVersion": 123, + "uid": "00000000-0000-0000-0000-000000000001", + "dummy3": "present", + "labels": "badlabel", + }, + }, + }, + "url": "http://localhost", + "status": "ok", + }, + }, + }, + } + out := bytes.NewBuffer([]byte{}) + + for _, test := range tests { + out.Reset() + printer := printers.NewHumanReadablePrinter(nil, nil, test.options).With(AddDefaultHandlers) + printer.PrintObj(test.object, out) + + matches, err := regexp.MatchString(test.expected, out.String()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !matches { + t.Errorf("wanted:\n%s\ngot:\n%s", test.expected, out) + } + } +} + type TestPrintType struct { Data string } @@ -704,16 +811,6 @@ func TestPrintNodeStatus(t *testing.T) { }, status: "Unknown,SchedulingDisabled", }, - { - node: api.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo12", - Labels: map[string]string{"kubeadm.alpha.kubernetes.io/role": "node"}, - }, - Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, - }, - status: "Ready,node", - }, } for _, test := range table { @@ -2332,6 +2429,8 @@ func TestPrintPodShowLabels(t *testing.T) { } func TestPrintService(t *testing.T) { + single_ExternalIP := []string{"80.11.12.10"} + mul_ExternalIP := []string{"80.11.12.10", "80.11.12.11"} tests := []struct { service api.Service expect string @@ -2343,8 +2442,10 @@ func TestPrintService(t *testing.T) { Spec: api.ServiceSpec{ Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{ - {Protocol: "tcp", - Port: 2233}, + { + Protocol: "tcp", + Port: 2233, + }, }, ClusterIP: "10.9.8.7", }, @@ -2352,13 +2453,14 @@ func TestPrintService(t *testing.T) { "test1\tClusterIP\t10.9.8.7\t\t2233/tcp\t\n", }, { - // Test name, cluster ip, port:nodePort with protocol + // Test NodePort service api.Service{ ObjectMeta: metav1.ObjectMeta{Name: "test2"}, Spec: api.ServiceSpec{ Type: api.ServiceTypeNodePort, Ports: []api.ServicePort{ - {Protocol: "tcp", + { + Protocol: "tcp", Port: 8888, NodePort: 9999, }, @@ -2368,6 +2470,112 @@ func TestPrintService(t *testing.T) { }, "test2\tNodePort\t10.9.8.7\t\t8888:9999/tcp\t\n", }, + { + // Test LoadBalancer service + api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test3"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeLoadBalancer, + Ports: []api.ServicePort{ + { + Protocol: "tcp", + Port: 8888, + }, + }, + ClusterIP: "10.9.8.7", + }, + }, + "test3\tLoadBalancer\t10.9.8.7\t\t8888/tcp\t\n", + }, + { + // Test LoadBalancer service with single ExternalIP and no LoadBalancerStatus + api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test4"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeLoadBalancer, + Ports: []api.ServicePort{ + { + Protocol: "tcp", + Port: 8888, + }, + }, + ClusterIP: "10.9.8.7", + ExternalIPs: single_ExternalIP, + }, + }, + "test4\tLoadBalancer\t10.9.8.7\t80.11.12.10\t8888/tcp\t\n", + }, + { + // Test LoadBalancer service with single ExternalIP + api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test5"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeLoadBalancer, + Ports: []api.ServicePort{ + { + Protocol: "tcp", + Port: 8888, + }, + }, + ClusterIP: "10.9.8.7", + ExternalIPs: single_ExternalIP, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "3.4.5.6", + Hostname: "test.cluster.com", + }, + }, + }, + }, + }, + "test5\tLoadBalancer\t10.9.8.7\t3.4.5.6,80.11.12.10\t8888/tcp\t\n", + }, + { + // Test LoadBalancer service with mul ExternalIPs + api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test6"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeLoadBalancer, + Ports: []api.ServicePort{ + { + Protocol: "tcp", + Port: 8888, + }, + }, + ClusterIP: "10.9.8.7", + ExternalIPs: mul_ExternalIP, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + Hostname: "test.cluster.local", + }, + { + IP: "3.4.5.6", + Hostname: "test.cluster.com", + }, + }, + }, + }, + }, + "test6\tLoadBalancer\t10.9.8.7\t2.3.4.5,3.4.5.6,80.11.12.10,80.11.12.11\t8888/tcp\t\n", + }, + { + // Test ExternalName service + api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test7"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeExternalName, + ExternalName: "my.database.example.com", + }, + }, + "test7\tExternalName\t\tmy.database.example.com\t\t\n", + }, } buf := bytes.NewBuffer([]byte{}) diff --git a/pkg/printers/printers.go b/pkg/printers/printers.go index 21775585293..c956170355b 100644 --- a/pkg/printers/printers.go +++ b/pkg/printers/printers.go @@ -91,7 +91,7 @@ func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.R case "jsonpath-file": if len(formatArgument) == 0 { - return nil, fmt.Errorf("jsonpath file format specified but no template file file given") + return nil, fmt.Errorf("jsonpath file format specified but no template file given") } data, err := ioutil.ReadFile(formatArgument) if err != nil { diff --git a/pkg/probe/http/BUILD b/pkg/probe/http/BUILD index c7e22d2c769..8109f37279f 100644 --- a/pkg/probe/http/BUILD +++ b/pkg/probe/http/BUILD @@ -14,6 +14,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/probe:go_default_library", + "//pkg/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", ], diff --git a/pkg/probe/http/http.go b/pkg/probe/http/http.go index d0744e792ea..b9821be05fc 100644 --- a/pkg/probe/http/http.go +++ b/pkg/probe/http/http.go @@ -26,6 +26,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/kubernetes/pkg/probe" + "k8s.io/kubernetes/pkg/version" "github.com/golang/glog" ) @@ -68,6 +69,14 @@ func DoHTTPProbe(url *url.URL, headers http.Header, client HTTPGetInterface) (pr // Convert errors into failures to catch timeouts. return probe.Failure, err.Error(), nil } + if _, ok := headers["User-Agent"]; !ok { + if headers == nil { + headers = http.Header{} + } + // explicitly set User-Agent so it's not set to default Go value + v := version.Get() + headers.Set("User-Agent", fmt.Sprintf("kube-probe/%s.%s", v.Major, v.Minor)) + } req.Header = headers if headers.Get("Host") != "" { req.Host = headers.Get("Host") diff --git a/pkg/probe/http/http_test.go b/pkg/probe/http/http_test.go index c1cf9a2c2c7..6e9efb86d29 100644 --- a/pkg/probe/http/http_test.go +++ b/pkg/probe/http/http_test.go @@ -40,12 +40,25 @@ func TestHTTPProbeChecker(t *testing.T) { } } + // Echo handler that returns the contents of request headers in the body + headerEchoHandler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + output := "" + for k, arr := range r.Header { + for _, v := range arr { + output += fmt.Sprintf("%s: %s\n", k, v) + } + } + w.Write([]byte(output)) + } + prober := New() testCases := []struct { handler func(w http.ResponseWriter, r *http.Request) reqHeaders http.Header health probe.Result accBody string + notBody string }{ // The probe will be filled in below. This is primarily testing that an HTTP GET happens. { @@ -54,23 +67,35 @@ func TestHTTPProbeChecker(t *testing.T) { accBody: "ok body", }, { - // Echo handler that returns the contents of request headers in the body - handler: func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - output := "" - for k, arr := range r.Header { - for _, v := range arr { - output += fmt.Sprintf("%s: %s\n", k, v) - } - } - w.Write([]byte(output)) - }, + handler: headerEchoHandler, reqHeaders: http.Header{ "X-Muffins-Or-Cupcakes": {"muffins"}, }, health: probe.Success, accBody: "X-Muffins-Or-Cupcakes: muffins", }, + { + handler: headerEchoHandler, + reqHeaders: http.Header{ + "User-Agent": {"foo/1.0"}, + }, + health: probe.Success, + accBody: "User-Agent: foo/1.0", + }, + { + handler: headerEchoHandler, + reqHeaders: http.Header{ + "User-Agent": {""}, + }, + health: probe.Success, + notBody: "User-Agent", + }, + { + handler: headerEchoHandler, + reqHeaders: http.Header{}, + health: probe.Success, + accBody: "User-Agent: kube-probe/", + }, { // Echo handler that returns the contents of Host in the body handler: func(w http.ResponseWriter, r *http.Request) { @@ -130,6 +155,9 @@ func TestHTTPProbeChecker(t *testing.T) { if !strings.Contains(output, test.accBody) { t.Errorf("Expected response body to contain %v, got %v", test.accBody, output) } + if test.notBody != "" && strings.Contains(output, test.notBody) { + t.Errorf("Expected response not to contain %v, got %v", test.notBody, output) + } } }() } diff --git a/pkg/proxy/OWNERS b/pkg/proxy/OWNERS index 646b3c519b1..f1f0145ed00 100644 --- a/pkg/proxy/OWNERS +++ b/pkg/proxy/OWNERS @@ -1,6 +1,5 @@ approvers: - thockin -- bprashanth - matchstick reviewers: - thockin @@ -8,7 +7,6 @@ reviewers: - smarterclayton - brendandburns - vishh -- bprashanth - justinsb - freehan - dcbw diff --git a/pkg/proxy/config/OWNERS b/pkg/proxy/config/OWNERS index 22bdb502d2e..d9bd05962a3 100755 --- a/pkg/proxy/config/OWNERS +++ b/pkg/proxy/config/OWNERS @@ -3,5 +3,4 @@ reviewers: - lavalamp - smarterclayton - brendandburns -- bprashanth - freehan diff --git a/pkg/proxy/iptables/OWNERS b/pkg/proxy/iptables/OWNERS index 1430b9e2e81..d0dffc12400 100755 --- a/pkg/proxy/iptables/OWNERS +++ b/pkg/proxy/iptables/OWNERS @@ -1,7 +1,6 @@ reviewers: - thockin - smarterclayton -- bprashanth - justinsb - freehan - dcbw diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 8e6c444e822..0a8123be094 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -250,6 +250,17 @@ type serviceChangeMap struct { items map[types.NamespacedName]*serviceChange } +type updateEndpointMapResult struct { + hcEndpoints map[types.NamespacedName]int + staleEndpoints map[endpointServicePair]bool + staleServiceNames map[proxy.ServicePortName]bool +} + +type updateServiceMapResult struct { + hcServices map[types.NamespacedName]uint16 + staleServices sets.String +} + type proxyServiceMap map[proxy.ServicePortName]*serviceInfo type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo @@ -694,29 +705,29 @@ func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool // map is cleared after applying them. func updateServiceMap( serviceMap proxyServiceMap, - changes *serviceChangeMap) (hcServices map[types.NamespacedName]uint16, staleServices sets.String) { - staleServices = sets.NewString() + changes *serviceChangeMap) (result updateServiceMapResult) { + result.staleServices = sets.NewString() func() { changes.lock.Lock() defer changes.lock.Unlock() for _, change := range changes.items { existingPorts := serviceMap.merge(change.current) - serviceMap.unmerge(change.previous, existingPorts, staleServices) + serviceMap.unmerge(change.previous, existingPorts, result.staleServices) } changes.items = make(map[types.NamespacedName]*serviceChange) }() // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to serviceMap. - hcServices = make(map[types.NamespacedName]uint16) + result.hcServices = make(map[types.NamespacedName]uint16) for svcPortName, info := range serviceMap { if info.healthCheckNodePort != 0 { - hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort) + result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort) } } - return hcServices, staleServices + return result } func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) { @@ -755,8 +766,9 @@ func (proxier *Proxier) OnEndpointsSynced() { func updateEndpointsMap( endpointsMap proxyEndpointsMap, changes *endpointsChangeMap, - hostname string) (hcEndpoints map[types.NamespacedName]int, staleSet map[endpointServicePair]bool) { - staleSet = make(map[endpointServicePair]bool) + hostname string) (result updateEndpointMapResult) { + result.staleEndpoints = make(map[endpointServicePair]bool) + result.staleServiceNames = make(map[proxy.ServicePortName]bool) func() { changes.lock.Lock() @@ -764,7 +776,7 @@ func updateEndpointsMap( for _, change := range changes.items { endpointsMap.unmerge(change.previous) endpointsMap.merge(change.current) - detectStaleConnections(change.previous, change.current, staleSet) + detectStaleConnections(change.previous, change.current, result.staleEndpoints, result.staleServiceNames) } changes.items = make(map[types.NamespacedName]*endpointsChange) }() @@ -775,18 +787,17 @@ func updateEndpointsMap( // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. - hcEndpoints = make(map[types.NamespacedName]int) + result.hcEndpoints = make(map[types.NamespacedName]int) localIPs := getLocalIPs(endpointsMap) for nsn, ips := range localIPs { - hcEndpoints[nsn] = len(ips) + result.hcEndpoints[nsn] = len(ips) } - return hcEndpoints, staleSet + return result } -// are modified by this function with detected stale -// connections. -func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool) { +// and are modified by this function with detected stale connections. +func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) { for svcPortName, epList := range oldEndpointsMap { for _, ep := range epList { stale := true @@ -802,6 +813,13 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, } } } + + for svcPortName, epList := range newEndpointsMap { + // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. + if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { + staleServiceNames[svcPortName] = true + } + } } func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String { @@ -983,11 +1001,20 @@ func (proxier *Proxier) syncProxyRules() { // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. - hcServices, staleServices := updateServiceMap( + serviceUpdateResult := updateServiceMap( proxier.serviceMap, &proxier.serviceChanges) - hcEndpoints, staleEndpoints := updateEndpointsMap( + endpointUpdateResult := updateEndpointsMap( proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname) + staleServices := serviceUpdateResult.staleServices + // merge stale services gathered from updateEndpointsMap + for svcPortName := range endpointUpdateResult.staleServiceNames { + if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP { + glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) + staleServices.Insert(svcInfo.clusterIP.String()) + } + } + glog.V(3).Infof("Syncing iptables rules") // Create and link the kube services chain. @@ -1594,17 +1621,17 @@ func (proxier *Proxier) syncProxyRules() { // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(hcServices); err != nil { + if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { glog.Errorf("Error syncing healtcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil { + if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { glog.Errorf("Error syncing healthcheck endoints: %v", err) } // Finish housekeeping. // TODO: these and clearUDPConntrackForPort() could be made more consistent. utilproxy.DeleteServiceConnections(proxier.exec, staleServices.List()) - proxier.deleteEndpointConnections(staleEndpoints) + proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints) } // Clear UDP conntrack for port or all conntrack entries when port equal zero. diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 989c244776e..894a98ccc4d 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -1088,24 +1088,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { for i := range services { fp.OnServiceAdd(services[i]) } - hcPorts, staleUDPServices := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 8 { t.Errorf("expected service map length 8, got %v", fp.serviceMap) } // The only-local-loadbalancer ones get added - if len(hcPorts) != 1 { - t.Errorf("expected 1 healthcheck port, got %v", hcPorts) + if len(result.hcServices) != 1 { + t.Errorf("expected 1 healthcheck port, got %v", result.hcServices) } else { nsn := makeNSN("somewhere", "only-local-load-balancer") - if port, found := hcPorts[nsn]; !found || port != 345 { - t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, hcPorts) + if port, found := result.hcServices[nsn]; !found || port != 345 { + t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.hcServices) } } - if len(staleUDPServices) != 0 { + if len(result.staleServices) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(staleUDPServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) } // Remove some stuff @@ -1121,24 +1121,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { fp.OnServiceDelete(services[2]) fp.OnServiceDelete(services[3]) - hcPorts, staleUDPServices = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 1 { t.Errorf("expected service map length 1, got %v", fp.serviceMap) } - if len(hcPorts) != 0 { - t.Errorf("expected 0 healthcheck ports, got %v", hcPorts) + if len(result.hcServices) != 0 { + t.Errorf("expected 0 healthcheck ports, got %v", result.hcServices) } // All services but one were deleted. While you'd expect only the ClusterIPs // from the three deleted services here, we still have the ClusterIP for // the not-deleted service, because one of it's ServicePorts was deleted. expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} - if len(staleUDPServices) != len(expectedStaleUDPServices) { - t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), staleUDPServices.List()) + if len(result.staleServices) != len(expectedStaleUDPServices) { + t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.staleServices.List()) } for _, ip := range expectedStaleUDPServices { - if !staleUDPServices.Has(ip) { + if !result.staleServices.Has(ip) { t.Errorf("expected stale UDP service service %s", ip) } } @@ -1154,21 +1154,25 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { svc.Spec.ClusterIP = api.ClusterIPNone svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0) }), + makeTestService("somewhere-else", "headless-without-port", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = api.ClusterIPNone + }), ) // Headless service should be ignored - hcPorts, staleUDPServices := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %d", len(fp.serviceMap)) } // No proxied services, so no healthchecks - if len(hcPorts) != 0 { - t.Errorf("expected healthcheck ports length 0, got %d", len(hcPorts)) + if len(result.hcServices) != 0 { + t.Errorf("expected healthcheck ports length 0, got %d", len(result.hcServices)) } - if len(staleUDPServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %d", len(staleUDPServices)) + if len(result.staleServices) != 0 { + t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) } } @@ -1185,16 +1189,16 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { }), ) - hcPorts, staleUDPServices := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %v", fp.serviceMap) } // No proxied services, so no healthchecks - if len(hcPorts) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) + if len(result.hcServices) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) } - if len(staleUDPServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices) + if len(result.staleServices) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.staleServices) } } @@ -1227,57 +1231,57 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { fp.OnServiceAdd(servicev1) - hcPorts, staleUDPServices := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(hcPorts) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) + if len(result.hcServices) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) } - if len(staleUDPServices) != 0 { + if len(result.staleServices) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(staleUDPServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) } // Change service to load-balancer fp.OnServiceUpdate(servicev1, servicev2) - hcPorts, staleUDPServices = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(hcPorts) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", hcPorts) + if len(result.hcServices) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) } - if len(staleUDPServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) + if len(result.staleServices) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) } // No change; make sure the service map stays the same and there are // no health-check changes fp.OnServiceUpdate(servicev2, servicev2) - hcPorts, staleUDPServices = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(hcPorts) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", hcPorts) + if len(result.hcServices) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) } - if len(staleUDPServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) + if len(result.staleServices) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) } // And back to ClusterIP fp.OnServiceUpdate(servicev2, servicev1) - hcPorts, staleUDPServices = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(hcPorts) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) + if len(result.hcServices) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) } - if len(staleUDPServices) != 0 { + if len(result.staleServices) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(staleUDPServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) } } @@ -1606,6 +1610,9 @@ func compareEndpointsMaps(t *testing.T, tci int, newMap, expected map[proxy.Serv func Test_updateEndpointsMap(t *testing.T) { var nodeName = testHostname + emptyEndpoint := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{} + } unnamedPort := func(ept *api.Endpoints) { ept.Subsets = []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{ @@ -1910,18 +1917,20 @@ func Test_updateEndpointsMap(t *testing.T) { // previousEndpoints and currentEndpoints are used to call appropriate // handlers OnEndpoints* (based on whether corresponding values are nil // or non-nil) and must be of equal length. - previousEndpoints []*api.Endpoints - currentEndpoints []*api.Endpoints - oldEndpoints map[proxy.ServicePortName][]*endpointsInfo - expectedResult map[proxy.ServicePortName][]*endpointsInfo - expectedStale []endpointServicePair - expectedHealthchecks map[types.NamespacedName]int + previousEndpoints []*api.Endpoints + currentEndpoints []*api.Endpoints + oldEndpoints map[proxy.ServicePortName][]*endpointsInfo + expectedResult map[proxy.ServicePortName][]*endpointsInfo + expectedStaleEndpoints []endpointServicePair + expectedStaleServiceNames map[proxy.ServicePortName]bool + expectedHealthchecks map[types.NamespacedName]int }{{ // Case[0]: nothing - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStale: []endpointServicePair{}, - expectedHealthchecks: map[types.NamespacedName]int{}, + oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[1]: no change, unnamed port previousEndpoints: []*api.Endpoints{ @@ -1940,8 +1949,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStale: []endpointServicePair{}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[2]: no change, named port, local previousEndpoints: []*api.Endpoints{ @@ -1960,7 +1970,8 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, }, @@ -1988,8 +1999,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: false}, }, }, - expectedStale: []endpointServicePair{}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[4]: no change, multiple subsets, multiple ports, local previousEndpoints: []*api.Endpoints{ @@ -2020,7 +2032,8 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.3:13", isLocal: false}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, }, @@ -2086,7 +2099,8 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "2.2.2.2:22", isLocal: true}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 2, makeNSN("ns2", "ep2"): 1, @@ -2105,7 +2119,10 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", ""): true, + }, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, }, @@ -2123,11 +2140,12 @@ func Test_updateEndpointsMap(t *testing.T) { }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", ""), }}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[8]: add an IP and port previousEndpoints: []*api.Endpoints{ @@ -2151,7 +2169,10 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + }, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, }, @@ -2178,7 +2199,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "1.1.1.2:11", servicePortName: makeServicePortName("ns1", "ep1", "p11"), }, { @@ -2188,7 +2209,8 @@ func Test_updateEndpointsMap(t *testing.T) { endpoint: "1.1.1.2:12", servicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[10]: add a subset previousEndpoints: []*api.Endpoints{ @@ -2210,7 +2232,10 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStale: []endpointServicePair{}, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + }, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, }, @@ -2235,11 +2260,12 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "1.1.1.2:12", servicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[12]: rename a port previousEndpoints: []*api.Endpoints{ @@ -2258,10 +2284,13 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p11-2"): true, + }, expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[13]: renumber a port @@ -2281,11 +2310,12 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:22", isLocal: false}, }, }, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, - expectedHealthchecks: map[types.NamespacedName]int{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[14]: complex add and remove previousEndpoints: []*api.Endpoints{ @@ -2337,7 +2367,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "4.4.4.4:44", isLocal: true}, }, }, - expectedStale: []endpointServicePair{{ + expectedStaleEndpoints: []endpointServicePair{{ endpoint: "2.2.2.2:22", servicePortName: makeServicePortName("ns2", "ep2", "p22"), }, { @@ -2353,10 +2383,35 @@ func Test_updateEndpointsMap(t *testing.T) { endpoint: "4.4.4.6:45", servicePortName: makeServicePortName("ns4", "ep4", "p45"), }}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + makeServicePortName("ns1", "ep1", "p122"): true, + makeServicePortName("ns3", "ep3", "p33"): true, + }, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns4", "ep4"): 1, }, - }} + }, { + // Case[15]: change from 0 endpoint address to 1 unnamed port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", emptyEndpoint), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPort), + }, + oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleServiceNames: map[proxy.ServicePortName]bool{ + makeServicePortName("ns1", "ep1", ""): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, + } for tci, tc := range testCases { ipt := iptablestest.NewFake() @@ -2390,19 +2445,27 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsUpdate(prev, curr) } } - hcEndpoints, stale := updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) + result := updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) - if len(stale) != len(tc.expectedStale) { - t.Errorf("[%d] expected %d stale, got %d: %v", tci, len(tc.expectedStale), len(stale), stale) + if len(result.staleEndpoints) != len(tc.expectedStaleEndpoints) { + t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.staleEndpoints), result.staleEndpoints) } - for _, x := range tc.expectedStale { - if stale[x] != true { - t.Errorf("[%d] expected stale[%v], but didn't find it: %v", tci, x, stale) + for _, x := range tc.expectedStaleEndpoints { + if result.staleEndpoints[x] != true { + t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.staleEndpoints) } } - if !reflect.DeepEqual(hcEndpoints, tc.expectedHealthchecks) { - t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, hcEndpoints) + if len(result.staleServiceNames) != len(tc.expectedStaleServiceNames) { + t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.staleServiceNames), result.staleServiceNames) + } + for svcName := range tc.expectedStaleServiceNames { + if result.staleServiceNames[svcName] != true { + t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.staleServiceNames) + } + } + if !reflect.DeepEqual(result.hcEndpoints, tc.expectedHealthchecks) { + t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.hcEndpoints) } } } diff --git a/pkg/proxy/userspace/OWNERS b/pkg/proxy/userspace/OWNERS index 192b2a2b708..4988f45c4dd 100755 --- a/pkg/proxy/userspace/OWNERS +++ b/pkg/proxy/userspace/OWNERS @@ -3,4 +3,3 @@ reviewers: - lavalamp - smarterclayton - freehan -- bprashanth diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index 5032577b81f..5224d45d4b8 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -166,9 +166,15 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab return nil, ErrProxyOnLocalhost } - hostIP, err := utilnet.ChooseHostInterface() - if err != nil { - return nil, fmt.Errorf("failed to select a host interface: %v", err) + // If listenIP is given, assume that is the intended host IP. Otherwise + // try to find a suitable host IP address from network interfaces. + var err error + hostIP := listenIP + if hostIP.Equal(net.IPv4zero) { + hostIP, err = utilnet.ChooseHostInterface() + if err != nil { + return nil, fmt.Errorf("failed to select a host interface: %v", err) + } } err = setRLimit(64 * 1000) diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go index 6190f6013b7..b610ebbbd76 100644 --- a/pkg/proxy/userspace/roundrobin.go +++ b/pkg/proxy/userspace/roundrobin.go @@ -326,16 +326,23 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin for portname := range oldPortsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname} if _, exists := registeredEndpoints[svcPort]; !exists { - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) - // Reset but don't delete. - state := lb.services[svcPort] - state.endpoints = []string{} - state.index = 0 - state.affinity.affinityMap = map[string]*affinityState{} + lb.resetService(svcPort) } } } +func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) { + // If the service is still around, reset but don't delete. + if state, ok := lb.services[svcPort]; ok { + if len(state.endpoints) > 0 { + glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + state.endpoints = []string{} + } + state.index = 0 + state.affinity.affinityMap = map[string]*affinityState{} + } +} + func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) { portsToEndpoints := buildPortsToEndpointsMap(endpoints) @@ -344,13 +351,7 @@ func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) { for portname := range portsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) - // If the service is still around, reset but don't delete. - if state, ok := lb.services[svcPort]; ok { - state.endpoints = []string{} - state.index = 0 - state.affinity.affinityMap = map[string]*affinityState{} - } + lb.resetService(svcPort) } } diff --git a/pkg/proxy/winuserspace/proxysocket.go b/pkg/proxy/winuserspace/proxysocket.go index d5f1e26b396..08bc0c25a6d 100644 --- a/pkg/proxy/winuserspace/proxysocket.go +++ b/pkg/proxy/winuserspace/proxysocket.go @@ -281,7 +281,7 @@ func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) msg.Question[0].Name = origName if err != nil { - glog.Warning("Unable to pack DNS packet. Error is: %v", err) + glog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -308,7 +308,7 @@ func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int mbuf, err := msg.PackBuffer(buffer) if err != nil { - glog.Warning("Unable to pack DNS packet. Error is: %v", err) + glog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -419,7 +419,7 @@ func processDNSQueryPacket( dnsSearch []string) (int, error) { msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warning("Unable to unpack DNS packet. Error is: %v", err) + glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return length, err } @@ -466,7 +466,7 @@ func processDNSResponsePacket( var drop bool msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warning("Unable to unpack DNS packet. Error is: %v", err) + glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return drop, length, err } diff --git a/pkg/registry/BUILD b/pkg/registry/BUILD index 77b76f2c059..e401174a2f3 100644 --- a/pkg/registry/BUILD +++ b/pkg/registry/BUILD @@ -71,8 +71,6 @@ filegroup( "//pkg/registry/extensions/podsecuritypolicy:all-srcs", "//pkg/registry/extensions/replicaset:all-srcs", "//pkg/registry/extensions/rest:all-srcs", - "//pkg/registry/extensions/thirdpartyresource:all-srcs", - "//pkg/registry/extensions/thirdpartyresourcedata:all-srcs", "//pkg/registry/networking/networkpolicy:all-srcs", "//pkg/registry/networking/rest:all-srcs", "//pkg/registry/policy/poddisruptionbudget:all-srcs", diff --git a/pkg/registry/OWNERS b/pkg/registry/OWNERS index 382a0c62805..978192dcac3 100644 --- a/pkg/registry/OWNERS +++ b/pkg/registry/OWNERS @@ -15,7 +15,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -29,7 +28,7 @@ reviewers: - pwittrock - roberthbailey - ncdc -- timstclair +- tallclair - eparis - mwielgus - timothysc diff --git a/pkg/registry/cachesize/OWNERS b/pkg/registry/cachesize/OWNERS index ab0dcba030e..aea0b7693d9 100755 --- a/pkg/registry/cachesize/OWNERS +++ b/pkg/registry/cachesize/OWNERS @@ -1,6 +1,5 @@ reviewers: - wojtek-t -- bprashanth - gmarek - soltysh - madhusudancs diff --git a/pkg/registry/core/event/BUILD b/pkg/registry/core/event/BUILD index 91c0da283d7..4a9fd1b4daa 100644 --- a/pkg/registry/core/event/BUILD +++ b/pkg/registry/core/event/BUILD @@ -24,6 +24,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", ], diff --git a/pkg/registry/core/event/strategy.go b/pkg/registry/core/event/strategy.go index 77ea252b482..e70518af04b 100644 --- a/pkg/registry/core/event/strategy.go +++ b/pkg/registry/core/event/strategy.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/pkg/api" @@ -40,6 +41,10 @@ type eventStrategy struct { // Event objects via the REST API. var Strategy = eventStrategy{api.Scheme, names.SimpleNameGenerator} +func (eventStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { + return rest.Unsupported +} + func (eventStrategy) NamespaceScoped() bool { return true } diff --git a/pkg/registry/core/node/rest/BUILD b/pkg/registry/core/node/rest/BUILD index 9fa78afc5db..96593785340 100644 --- a/pkg/registry/core/node/rest/BUILD +++ b/pkg/registry/core/node/rest/BUILD @@ -17,9 +17,9 @@ go_library( "//pkg/kubelet/client:go_default_library", "//pkg/registry/core/node:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/pkg/registry/core/node/rest/proxy.go b/pkg/registry/core/node/rest/proxy.go index adea1b4b1be..7535acade1a 100644 --- a/pkg/registry/core/node/rest/proxy.go +++ b/pkg/registry/core/node/rest/proxy.go @@ -23,9 +23,9 @@ import ( "path" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/proxy" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - genericrest "k8s.io/apiserver/pkg/registry/generic/rest" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/capabilities" @@ -75,8 +75,8 @@ func (r *ProxyREST) Connect(ctx genericapirequest.Context, id string, opts runti return newThrottledUpgradeAwareProxyHandler(location, transport, true, false, responder), nil } -func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) *genericrest.UpgradeAwareProxyHandler { - handler := genericrest.NewUpgradeAwareProxyHandler(location, transport, wrapTransport, upgradeRequired, responder) +func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) *proxy.UpgradeAwareHandler { + handler := proxy.NewUpgradeAwareHandler(location, transport, wrapTransport, upgradeRequired, responder) handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec return handler } diff --git a/pkg/registry/core/pod/rest/BUILD b/pkg/registry/core/pod/rest/BUILD index 79a450de886..ffce1b67636 100644 --- a/pkg/registry/core/pod/rest/BUILD +++ b/pkg/registry/core/pod/rest/BUILD @@ -23,10 +23,13 @@ go_library( "//pkg/registry/core/pod:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/registry/core/pod/rest/subresources.go b/pkg/registry/core/pod/rest/subresources.go index e7fae10cd73..ec6f7ac4857 100644 --- a/pkg/registry/core/pod/rest/subresources.go +++ b/pkg/registry/core/pod/rest/subresources.go @@ -23,10 +23,12 @@ import ( "path" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/proxy" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfeatures "k8s.io/apiserver/pkg/features" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - genericrest "k8s.io/apiserver/pkg/registry/generic/rest" "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/kubelet/client" @@ -189,9 +191,9 @@ func (r *PortForwardREST) Connect(ctx genericapirequest.Context, name string, op return newThrottledUpgradeAwareProxyHandler(location, transport, false, true, true, responder), nil } -func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired, interceptRedirects bool, responder rest.Responder) *genericrest.UpgradeAwareProxyHandler { - handler := genericrest.NewUpgradeAwareProxyHandler(location, transport, wrapTransport, upgradeRequired, responder) - handler.InterceptRedirects = interceptRedirects +func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired, interceptRedirects bool, responder rest.Responder) *proxy.UpgradeAwareHandler { + handler := proxy.NewUpgradeAwareHandler(location, transport, wrapTransport, upgradeRequired, responder) + handler.InterceptRedirects = interceptRedirects && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec return handler } diff --git a/pkg/registry/core/service/BUILD b/pkg/registry/core/service/BUILD index 96159cc2b40..ccadf03fb60 100644 --- a/pkg/registry/core/service/BUILD +++ b/pkg/registry/core/service/BUILD @@ -36,12 +36,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/pkg/registry/core/service/ipallocator/allocator.go b/pkg/registry/core/service/ipallocator/allocator.go index 7adb30101bb..fe25a30212c 100644 --- a/pkg/registry/core/service/ipallocator/allocator.go +++ b/pkg/registry/core/service/ipallocator/allocator.go @@ -33,6 +33,9 @@ type Interface interface { AllocateNext() (net.IP, error) Release(net.IP) error ForEach(func(net.IP)) + + // For testing + Has(ip net.IP) bool } var ( diff --git a/pkg/registry/core/service/portallocator/allocator.go b/pkg/registry/core/service/portallocator/allocator.go index 7badfe7ad4c..c73253f2dcb 100644 --- a/pkg/registry/core/service/portallocator/allocator.go +++ b/pkg/registry/core/service/portallocator/allocator.go @@ -34,6 +34,9 @@ type Interface interface { AllocateNext() (int, error) Release(int) error ForEach(func(int)) + + // For testing + Has(int) bool } var ( diff --git a/pkg/registry/core/service/portallocator/operation.go b/pkg/registry/core/service/portallocator/operation.go index 08d9d587815..00dd7f03cf4 100644 --- a/pkg/registry/core/service/portallocator/operation.go +++ b/pkg/registry/core/service/portallocator/operation.go @@ -28,7 +28,7 @@ package portallocator // ... // write(updatedOwner) /// op.Commit() -type portAllocationOperation struct { +type PortAllocationOperation struct { pa Interface allocated []int releaseDeferred []int @@ -36,8 +36,8 @@ type portAllocationOperation struct { } // Creates a portAllocationOperation, tracking a set of allocations & releases -func StartOperation(pa Interface) *portAllocationOperation { - op := &portAllocationOperation{} +func StartOperation(pa Interface) *PortAllocationOperation { + op := &PortAllocationOperation{} op.pa = pa op.allocated = []int{} op.releaseDeferred = []int{} @@ -46,14 +46,14 @@ func StartOperation(pa Interface) *portAllocationOperation { } // Will rollback unless marked as shouldRollback = false by a Commit(). Call from a defer block -func (op *portAllocationOperation) Finish() { +func (op *PortAllocationOperation) Finish() { if op.shouldRollback { op.Rollback() } } // (Try to) undo any operations we did -func (op *portAllocationOperation) Rollback() []error { +func (op *PortAllocationOperation) Rollback() []error { errors := []error{} for _, allocated := range op.allocated { @@ -72,7 +72,7 @@ func (op *portAllocationOperation) Rollback() []error { // (Try to) perform any deferred operations. // Note that even if this fails, we don't rollback; we always want to err on the side of over-allocation, // and Commit should be called _after_ the owner is written -func (op *portAllocationOperation) Commit() []error { +func (op *PortAllocationOperation) Commit() []error { errors := []error{} for _, release := range op.releaseDeferred { @@ -94,7 +94,7 @@ func (op *portAllocationOperation) Commit() []error { } // Allocates a port, and record it for future rollback -func (op *portAllocationOperation) Allocate(port int) error { +func (op *PortAllocationOperation) Allocate(port int) error { err := op.pa.Allocate(port) if err == nil { op.allocated = append(op.allocated, port) @@ -103,7 +103,7 @@ func (op *portAllocationOperation) Allocate(port int) error { } // Allocates a port, and record it for future rollback -func (op *portAllocationOperation) AllocateNext() (int, error) { +func (op *PortAllocationOperation) AllocateNext() (int, error) { port, err := op.pa.AllocateNext() if err == nil { op.allocated = append(op.allocated, port) @@ -112,6 +112,6 @@ func (op *portAllocationOperation) AllocateNext() (int, error) { } // Marks a port so that it will be released if this operation Commits -func (op *portAllocationOperation) ReleaseDeferred(port int) { +func (op *PortAllocationOperation) ReleaseDeferred(port int) { op.releaseDeferred = append(op.releaseDeferred, port) } diff --git a/pkg/registry/core/service/proxy.go b/pkg/registry/core/service/proxy.go index 7f5f503a396..7875e58a69a 100644 --- a/pkg/registry/core/service/proxy.go +++ b/pkg/registry/core/service/proxy.go @@ -23,8 +23,8 @@ import ( "path" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/proxy" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - genericrest "k8s.io/apiserver/pkg/registry/generic/rest" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/capabilities" @@ -71,8 +71,8 @@ func (r *ProxyREST) Connect(ctx genericapirequest.Context, id string, opts runti return newThrottledUpgradeAwareProxyHandler(location, transport, true, false, responder), nil } -func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) *genericrest.UpgradeAwareProxyHandler { - handler := genericrest.NewUpgradeAwareProxyHandler(location, transport, wrapTransport, upgradeRequired, responder) +func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) *proxy.UpgradeAwareHandler { + handler := proxy.NewUpgradeAwareHandler(location, transport, wrapTransport, upgradeRequired, responder) handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec return handler } diff --git a/pkg/registry/core/service/rest.go b/pkg/registry/core/service/rest.go index a369708579d..9195e9c5e5e 100644 --- a/pkg/registry/core/service/rest.go +++ b/pkg/registry/core/service/rest.go @@ -357,58 +357,45 @@ func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest. return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs) } + // TODO: this should probably move to strategy.PrepareForCreate() + releaseServiceIP := false + defer func() { + if releaseServiceIP { + if helper.IsServiceIPSet(service) { + rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) + } + } + }() + nodePortOp := portallocator.StartOperation(rs.serviceNodePorts) defer nodePortOp.Finish() - assignNodePorts := shouldAssignNodePorts(service) - - oldNodePorts := CollectServiceNodePorts(oldService) - - newNodePorts := []int{} - if assignNodePorts { - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - nodePort := int(servicePort.NodePort) - if nodePort != 0 { - if !contains(oldNodePorts, nodePort) { - err := nodePortOp.Allocate(nodePort) - if err != nil { - el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort, err.Error())} - return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, el) - } - } - } else { - nodePort, err = nodePortOp.AllocateNext() - if err != nil { - // TODO: what error should be returned here? It's not a - // field-level validation failure (the field is valid), and it's - // not really an internal error. - return nil, false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) - } - servicePort.NodePort = int32(nodePort) - } - // Detect duplicate node ports; this should have been caught by validation, so we panic - if contains(newNodePorts, nodePort) { - panic("duplicate node port") - } - newNodePorts = append(newNodePorts, nodePort) + // Update service from ExternalName to non-ExternalName, should initialize ClusterIP. + if oldService.Spec.Type == api.ServiceTypeExternalName && service.Spec.Type != api.ServiceTypeExternalName { + if releaseServiceIP, err = rs.initClusterIP(service); err != nil { + return nil, false, err } - } else { - // Validate should have validated that nodePort == 0 } - - // The comparison loops are O(N^2), but we don't expect N to be huge - // (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot) - for _, oldNodePort := range oldNodePorts { - if contains(newNodePorts, oldNodePort) { - continue + // Update service from non-ExternalName to ExternalName, should release ClusterIP if exists. + if oldService.Spec.Type != api.ServiceTypeExternalName && service.Spec.Type == api.ServiceTypeExternalName { + if helper.IsServiceIPSet(oldService) { + rs.serviceIPs.Release(net.ParseIP(oldService.Spec.ClusterIP)) } - nodePortOp.ReleaseDeferred(oldNodePort) } - - // Remove any LoadBalancerStatus now if Type != LoadBalancer; - // although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity. + // Update service from NodePort or LoadBalancer to ExternalName or ClusterIP, should release NodePort if exists. + if (oldService.Spec.Type == api.ServiceTypeNodePort || oldService.Spec.Type == api.ServiceTypeLoadBalancer) && + (service.Spec.Type == api.ServiceTypeExternalName || service.Spec.Type == api.ServiceTypeClusterIP) { + rs.releaseNodePort(oldService, nodePortOp) + } + // Update service from any type to NodePort or LoadBalancer, should update NodePort. + if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer { + if err := rs.updateNodePort(oldService, service, nodePortOp); err != nil { + return nil, false, err + } + } + // Update service from LoadBalancer to non-LoadBalancer, should remove any LoadBalancerStatus. if service.Spec.Type != api.ServiceTypeLoadBalancer { + // Although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity. service.Status.LoadBalancer = api.LoadBalancerStatus{} } @@ -425,13 +412,14 @@ func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest. } out, err := rs.registry.UpdateService(ctx, service) - if err == nil { el := nodePortOp.Commit() if el != nil { // problems should be fixed by an eventual reconciliation / restart glog.Errorf("error(s) committing NodePorts changes: %v", el) } + + releaseServiceIP = false } return out, false, err @@ -570,3 +558,82 @@ func (rs *REST) allocateHealthCheckNodePort(service *api.Service) error { } return nil } + +// The return bool value indicates if a cluster IP is allocated successfully. +func (rs *REST) initClusterIP(service *api.Service) (bool, error) { + switch { + case service.Spec.ClusterIP == "": + // Allocate next available. + ip, err := rs.serviceIPs.AllocateNext() + if err != nil { + // TODO: what error should be returned here? It's not a + // field-level validation failure (the field is valid), and it's + // not really an internal error. + return false, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err)) + } + service.Spec.ClusterIP = ip.String() + return true, nil + case service.Spec.ClusterIP != api.ClusterIPNone && service.Spec.ClusterIP != "": + // Try to respect the requested IP. + if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil { + // TODO: when validation becomes versioned, this gets more complicated. + el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())} + return false, errors.NewInvalid(api.Kind("Service"), service.Name, el) + } + return true, nil + } + + return false, nil +} + +func (rs *REST) updateNodePort(oldService, newService *api.Service, nodePortOp *portallocator.PortAllocationOperation) error { + oldNodePorts := CollectServiceNodePorts(oldService) + + newNodePorts := []int{} + for i := range newService.Spec.Ports { + servicePort := &newService.Spec.Ports[i] + nodePort := int(servicePort.NodePort) + if nodePort != 0 { + if !contains(oldNodePorts, nodePort) { + err := nodePortOp.Allocate(nodePort) + if err != nil { + el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort, err.Error())} + return errors.NewInvalid(api.Kind("Service"), newService.Name, el) + } + } + } else { + nodePort, err := nodePortOp.AllocateNext() + if err != nil { + // TODO: what error should be returned here? It's not a + // field-level validation failure (the field is valid), and it's + // not really an internal error. + return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) + } + servicePort.NodePort = int32(nodePort) + } + // Detect duplicate node ports; this should have been caught by validation, so we panic + if contains(newNodePorts, nodePort) { + panic("duplicate node port") + } + newNodePorts = append(newNodePorts, nodePort) + } + + // The comparison loops are O(N^2), but we don't expect N to be huge + // (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot) + for _, oldNodePort := range oldNodePorts { + if contains(newNodePorts, oldNodePort) { + continue + } + nodePortOp.ReleaseDeferred(oldNodePort) + } + + return nil +} + +func (rs *REST) releaseNodePort(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) { + nodePorts := CollectServiceNodePorts(service) + + for _, nodePort := range nodePorts { + nodePortOp.ReleaseDeferred(nodePort) + } +} diff --git a/pkg/registry/core/service/rest_test.go b/pkg/registry/core/service/rest_test.go index f6c1b1bed98..100eadb26c5 100644 --- a/pkg/registry/core/service/rest_test.go +++ b/pkg/registry/core/service/rest_test.go @@ -1276,3 +1276,269 @@ func TestServiceRegistryExternalTrafficAnnotationClusterIP(t *testing.T) { t.Errorf("Unexpected allocation of health check node port annotation %s", api.BetaAnnotationHealthCheckNodePort) } } + +func TestInitClusterIP(t *testing.T) { + storage, _ := NewTestREST(t, nil) + + testCases := []struct { + name string + svc *api.Service + expectClusterIP bool + }{ + { + name: "Allocate new ClusterIP", + svc: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeClusterIP, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + }}, + }, + }, + expectClusterIP: true, + }, + { + name: "Allocate specified ClusterIP", + svc: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeClusterIP, + ClusterIP: "1.2.3.4", + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + }}, + }, + }, + expectClusterIP: true, + }, + { + name: "Shouldn't allocate ClusterIP", + svc: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeClusterIP, + ClusterIP: api.ClusterIPNone, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + }}, + }, + }, + expectClusterIP: false, + }, + } + + for _, test := range testCases { + hasAllocatedIP, err := storage.initClusterIP(test.svc) + if err != nil { + t.Errorf("%q: unexpected error: %v", test.name, err) + } + + if hasAllocatedIP != test.expectClusterIP { + t.Errorf("%q: expected %v, but got %v", test.name, test.expectClusterIP, hasAllocatedIP) + } + + if test.expectClusterIP { + if !storage.serviceIPs.Has(net.ParseIP(test.svc.Spec.ClusterIP)) { + t.Errorf("%q: unexpected ClusterIP %q, out of range", test.name, test.svc.Spec.ClusterIP) + } + } + + if test.name == "Allocate specified ClusterIP" && test.svc.Spec.ClusterIP != "1.2.3.4" { + t.Errorf("%q: expected ClusterIP %q, but got %q", test.name, "1.2.3.4", test.svc.Spec.ClusterIP) + } + } +} + +func TestUpdateNodePort(t *testing.T) { + storage, _ := NewTestREST(t, nil) + nodePortOp := portallocator.StartOperation(storage.serviceNodePorts) + defer nodePortOp.Finish() + + testCases := []struct { + name string + oldService *api.Service + newService *api.Service + expectSpecifiedNodePorts []int + }{ + { + name: "Old service and new service have the same NodePort", + oldService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + NodePort: 30053, + }}, + }, + }, + newService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + NodePort: 30053, + }}, + }, + }, + expectSpecifiedNodePorts: []int{30053}, + }, + { + name: "Old service has more NodePorts than new service has", + oldService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{ + { + Name: "port-tcp", + Port: 53, + TargetPort: intstr.FromInt(6502), + Protocol: api.ProtocolTCP, + NodePort: 30053, + }, + { + Name: "port-udp", + Port: 53, + TargetPort: intstr.FromInt(6502), + Protocol: api.ProtocolUDP, + NodePort: 30053, + }, + }, + }, + }, + newService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{ + { + Name: "port-tcp", + Port: 53, + TargetPort: intstr.FromInt(6502), + Protocol: api.ProtocolTCP, + NodePort: 30053, + }, + }, + }, + }, + expectSpecifiedNodePorts: []int{30053}, + }, + { + name: "Change protocol of ServicePort without changing NodePort", + oldService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{ + { + Name: "port-tcp", + Port: 53, + TargetPort: intstr.FromInt(6502), + Protocol: api.ProtocolTCP, + NodePort: 30053, + }, + }, + }, + }, + newService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{ + { + Name: "port-udp", + Port: 53, + TargetPort: intstr.FromInt(6502), + Protocol: api.ProtocolUDP, + NodePort: 30053, + }, + }, + }, + }, + expectSpecifiedNodePorts: []int{30053}, + }, + { + name: "Should allocate NodePort when changing service type to NodePort", + oldService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeClusterIP, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + }}, + }, + }, + newService: &api.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + SessionAffinity: api.ServiceAffinityNone, + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{{ + Port: 6502, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(6502), + }}, + }, + }, + expectSpecifiedNodePorts: []int{}, + }, + } + + for _, test := range testCases { + err := storage.updateNodePort(test.oldService, test.newService, nodePortOp) + if err != nil { + t.Errorf("%q: unexpected error: %v", test.name, err) + continue + } + _ = nodePortOp.Commit() + + serviceNodePorts := CollectServiceNodePorts(test.newService) + + if len(test.expectSpecifiedNodePorts) == 0 { + for _, nodePort := range serviceNodePorts { + if !storage.serviceNodePorts.Has(nodePort) { + t.Errorf("%q: unexpected NodePort %d, out of range", test.name, nodePort) + } + } + } else if !reflect.DeepEqual(serviceNodePorts, test.expectSpecifiedNodePorts) { + t.Errorf("%q: expected NodePorts %v, but got %v", test.name, test.expectSpecifiedNodePorts, serviceNodePorts) + } + + } +} diff --git a/pkg/registry/extensions/rest/BUILD b/pkg/registry/extensions/rest/BUILD index 2b21c5dc1de..c88831d6bb5 100644 --- a/pkg/registry/extensions/rest/BUILD +++ b/pkg/registry/extensions/rest/BUILD @@ -5,33 +5,15 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["thirdparty_controller_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/apis/extensions:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], ) go_library( name = "go_default_library", - srcs = [ - "storage_extensions.go", - "thirdparty_controller.go", - ], + srcs = ["storage_extensions.go"], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", "//pkg/registry/extensions/controller/storage:go_default_library", "//pkg/registry/extensions/daemonset/storage:go_default_library", "//pkg/registry/extensions/deployment/storage:go_default_library", @@ -39,15 +21,7 @@ go_library( "//pkg/registry/extensions/networkpolicy/storage:go_default_library", "//pkg/registry/extensions/podsecuritypolicy/storage:go_default_library", "//pkg/registry/extensions/replicaset/storage:go_default_library", - "//pkg/registry/extensions/thirdpartyresource/storage:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", diff --git a/pkg/registry/extensions/rest/storage_extensions.go b/pkg/registry/extensions/rest/storage_extensions.go index f674e94e920..674b2aab79e 100644 --- a/pkg/registry/extensions/rest/storage_extensions.go +++ b/pkg/registry/extensions/rest/storage_extensions.go @@ -17,21 +17,13 @@ limitations under the License. package rest import ( - "fmt" - "time" - - "github.com/golang/glog" - extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" expcontrollerstore "k8s.io/kubernetes/pkg/registry/extensions/controller/storage" daemonstore "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage" deploymentstore "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage" @@ -39,11 +31,9 @@ import ( networkpolicystore "k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage" pspstore "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage" replicasetstore "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage" - thirdpartyresourcestore "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage" ) type RESTStorageProvider struct { - ResourceInterface ResourceInterface } func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { @@ -70,11 +60,6 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag storage["replicationcontrollers"] = controllerStorage.ReplicationController storage["replicationcontrollers/scale"] = controllerStorage.Scale - if apiResourceConfigSource.ResourceEnabled(version.WithResource("thirdpartyresources")) { - thirdPartyResourceStorage := thirdpartyresourcestore.NewREST(restOptionsGetter) - storage["thirdpartyresources"] = thirdPartyResourceStorage - } - if apiResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) { daemonSetStorage, daemonSetStatusStorage := daemonstore.NewREST(restOptionsGetter) storage["daemonsets"] = daemonSetStorage @@ -110,29 +95,6 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag return storage } -func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) { - return "extensions/third-party-resources", p.postStartHookFunc, nil -} -func (p RESTStorageProvider) postStartHookFunc(hookContext genericapiserver.PostStartHookContext) error { - clientset, err := extensionsclient.NewForConfig(hookContext.LoopbackClientConfig) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to initialize client: %v", err)) - return nil - } - - thirdPartyControl := ThirdPartyController{ - master: p.ResourceInterface, - client: clientset, - } - go wait.Forever(func() { - if err := thirdPartyControl.SyncResources(); err != nil { - glog.Warningf("third party resource sync failed: %v", err) - } - }, 10*time.Second) - - return nil -} - func (p RESTStorageProvider) GroupName() string { return extensions.GroupName } diff --git a/pkg/registry/extensions/rest/thirdparty_controller.go b/pkg/registry/extensions/rest/thirdparty_controller.go deleted file mode 100644 index 2db39b2376c..00000000000 --- a/pkg/registry/extensions/rest/thirdparty_controller.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/extensions" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" -) - -// ResourceInterface is the interface for the parts of the master that know how to add/remove -// third party resources. Extracted into an interface for injection for testing. -type ResourceInterface interface { - // Remove a third party resource based on the RESTful path for that resource, the path is / - RemoveThirdPartyResource(path string) error - // Install a third party resource described by 'rsrc' - InstallThirdPartyResource(rsrc *extensions.ThirdPartyResource) error - // Is a particular third party resource currently installed? - HasThirdPartyResource(rsrc *extensions.ThirdPartyResource) (bool, error) - // List all currently installed third party resources, the returned - // names are of the form / - ListThirdPartyResources() []string -} - -const thirdpartyprefix = "/apis" - -// ThirdPartyController is a control loop that knows how to synchronize ThirdPartyResource objects with -// RESTful resources which are present in the API server. -type ThirdPartyController struct { - master ResourceInterface - client extensionsclient.ThirdPartyResourcesGetter -} - -// SyncOneResource synchronizes a single resource with RESTful resources on the master -func (t *ThirdPartyController) SyncOneResource(rsrc *extensions.ThirdPartyResource) error { - // TODO: we also need to test if the existing installed resource matches the resource we are sync-ing. - // Currently, if there is an older, incompatible resource installed, we won't remove it. We should detect - // older, incompatible resources and remove them before testing if the resource exists. - hasResource, err := t.master.HasThirdPartyResource(rsrc) - if err != nil { - return err - } - if !hasResource { - return t.master.InstallThirdPartyResource(rsrc) - } - return nil -} - -// Synchronize all resources with RESTful resources on the master -func (t *ThirdPartyController) SyncResources() error { - list, err := t.client.ThirdPartyResources().List(metav1.ListOptions{}) - if err != nil { - return err - } - return t.syncResourceList(list) -} - -func (t *ThirdPartyController) syncResourceList(list runtime.Object) error { - existing := sets.String{} - switch list := list.(type) { - case *extensions.ThirdPartyResourceList: - // Loop across all schema objects for third party resources - for ix := range list.Items { - item := &list.Items[ix] - // extract the api group and resource kind from the schema - _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item) - if err != nil { - return err - } - // place it in the set of resources that we expect, so that we don't delete it in the delete pass - existing.Insert(MakeThirdPartyPath(group)) - // ensure a RESTful resource for this schema exists on the master - if err := t.SyncOneResource(item); err != nil { - return err - } - } - default: - return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list) - } - // deletion phase, get all installed RESTful resources - installed := t.master.ListThirdPartyResources() - for _, installedAPI := range installed { - found := false - // search across the expected restful resources to see if this resource belongs to one of the expected ones - for _, apiPath := range existing.List() { - if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") { - found = true - break - } - } - // not expected, delete the resource - if !found { - if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil { - return err - } - } - } - - return nil -} - -func MakeThirdPartyPath(group string) string { - if len(group) == 0 { - return thirdpartyprefix - } - return thirdpartyprefix + "/" + group -} - -func GetThirdPartyGroupName(path string) string { - return strings.TrimPrefix(strings.TrimPrefix(path, thirdpartyprefix), "/") -} diff --git a/pkg/registry/extensions/rest/thirdparty_controller_test.go b/pkg/registry/extensions/rest/thirdparty_controller_test.go deleted file mode 100644 index 74c0781057d..00000000000 --- a/pkg/registry/extensions/rest/thirdparty_controller_test.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - expapi "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" -) - -type FakeAPIInterface struct { - removed []string - installed []*expapi.ThirdPartyResource - apis []string - t *testing.T -} - -func (f *FakeAPIInterface) RemoveThirdPartyResource(path string) error { - f.removed = append(f.removed, path) - return nil -} - -func (f *FakeAPIInterface) InstallThirdPartyResource(rsrc *expapi.ThirdPartyResource) error { - f.installed = append(f.installed, rsrc) - _, group, _ := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) - f.apis = append(f.apis, MakeThirdPartyPath(group)) - return nil -} - -func (f *FakeAPIInterface) HasThirdPartyResource(rsrc *expapi.ThirdPartyResource) (bool, error) { - if f.apis == nil { - return false, nil - } - _, group, _ := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) - path := MakeThirdPartyPath(group) - for _, api := range f.apis { - if api == path { - return true, nil - } - } - return false, nil -} - -func (f *FakeAPIInterface) ListThirdPartyResources() []string { - return f.apis -} - -func TestSyncAPIs(t *testing.T) { - resourcesNamed := func(names ...string) []expapi.ThirdPartyResource { - result := []expapi.ThirdPartyResource{} - for _, name := range names { - result = append(result, expapi.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: name}}) - } - return result - } - - tests := []struct { - list *expapi.ThirdPartyResourceList - apis []string - expectedInstalled []string - expectedRemoved []string - name string - }{ - { - list: &expapi.ThirdPartyResourceList{ - Items: resourcesNamed("foo.example.com"), - }, - expectedInstalled: []string{"foo.example.com"}, - name: "simple add", - }, - { - list: &expapi.ThirdPartyResourceList{ - Items: resourcesNamed("foo.example.com"), - }, - apis: []string{ - "/apis/example.com", - "/apis/example.com/v1", - }, - name: "does nothing", - }, - { - list: &expapi.ThirdPartyResourceList{ - Items: resourcesNamed("foo.example.com"), - }, - apis: []string{ - "/apis/example.com", - "/apis/example.com/v1", - "/apis/example.co", - "/apis/example.co/v1", - }, - name: "deletes substring API", - expectedRemoved: []string{ - "/apis/example.co", - "/apis/example.co/v1", - }, - }, - { - list: &expapi.ThirdPartyResourceList{ - Items: resourcesNamed("foo.example.com", "foo.company.com"), - }, - apis: []string{ - "/apis/company.com", - "/apis/company.com/v1", - }, - expectedInstalled: []string{"foo.example.com"}, - name: "adds with existing", - }, - { - list: &expapi.ThirdPartyResourceList{ - Items: resourcesNamed("foo.example.com"), - }, - apis: []string{ - "/apis/company.com", - "/apis/company.com/v1", - }, - expectedInstalled: []string{"foo.example.com"}, - expectedRemoved: []string{"/apis/company.com", "/apis/company.com/v1"}, - name: "removes with existing", - }, - } - - for _, test := range tests { - fake := FakeAPIInterface{ - apis: test.apis, - t: t, - } - - cntrl := ThirdPartyController{master: &fake} - - if err := cntrl.syncResourceList(test.list); err != nil { - t.Errorf("[%s] unexpected error: %v", test.name, err) - } - if len(test.expectedInstalled) != len(fake.installed) { - t.Errorf("[%s] unexpected installed APIs: %d, expected %d (%#v)", test.name, len(fake.installed), len(test.expectedInstalled), fake.installed[0]) - continue - } else { - names := sets.String{} - for ix := range fake.installed { - names.Insert(fake.installed[ix].Name) - } - for _, name := range test.expectedInstalled { - if !names.Has(name) { - t.Errorf("[%s] missing installed API: %s", test.name, name) - } - } - } - if len(test.expectedRemoved) != len(fake.removed) { - t.Errorf("[%s] unexpected installed APIs: %d, expected %d", test.name, len(fake.removed), len(test.expectedRemoved)) - continue - } else { - names := sets.String{} - names.Insert(fake.removed...) - for _, name := range test.expectedRemoved { - if !names.Has(name) { - t.Errorf("[%s] missing removed API: %s (%s)", test.name, name, names) - } - } - } - } -} diff --git a/pkg/registry/extensions/thirdpartyresource/BUILD b/pkg/registry/extensions/thirdpartyresource/BUILD deleted file mode 100644 index 177124b8c44..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/BUILD +++ /dev/null @@ -1,59 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "strategy.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["strategy_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/apis/extensions:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/registry/extensions/thirdpartyresource/storage:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/pkg/registry/extensions/thirdpartyresource/doc.go b/pkg/registry/extensions/thirdpartyresource/doc.go deleted file mode 100644 index 5cfaf25637b..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package thirdpartyresource provides Registry interface and its REST -// implementation for storing ThirdPartyResource api objects. -package thirdpartyresource // import "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource" diff --git a/pkg/registry/extensions/thirdpartyresource/storage/BUILD b/pkg/registry/extensions/thirdpartyresource/storage/BUILD deleted file mode 100644 index 070d0db0706..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/storage/BUILD +++ /dev/null @@ -1,55 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["storage_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/apis/extensions:go_default_library", - "//pkg/registry/registrytest:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["storage.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/registry/cachesize:go_default_library", - "//pkg/registry/extensions/thirdpartyresource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/registry/extensions/thirdpartyresource/storage/storage.go b/pkg/registry/extensions/thirdpartyresource/storage/storage.go deleted file mode 100644 index 01c98ff6ffe..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/storage/storage.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource" -) - -// REST implements a RESTStorage for ThirdPartyResources -type REST struct { - *genericregistry.Store -} - -// NewREST returns a registry which will store ThirdPartyResource in the given helper -func NewREST(optsGetter generic.RESTOptionsGetter) *REST { - resource := extensions.Resource("thirdpartyresources") - opts, err := optsGetter.GetRESTOptions(resource) - if err != nil { - panic(err) // TODO: Propagate error up - } - - // We explicitly do NOT do any decoration here yet. // TODO determine why we do not want to cache here - opts.Decorator = generic.UndecoratedStorage - - store := &genericregistry.Store{ - Copier: api.Scheme, - NewFunc: func() runtime.Object { return &extensions.ThirdPartyResource{} }, - NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceList{} }, - PredicateFunc: thirdpartyresource.Matcher, - QualifiedResource: resource, - WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), - - CreateStrategy: thirdpartyresource.Strategy, - UpdateStrategy: thirdpartyresource.Strategy, - DeleteStrategy: thirdpartyresource.Strategy, - } - options := &generic.StoreOptions{RESTOptions: opts, AttrFunc: thirdpartyresource.GetAttrs} // Pass in opts to use UndecoratedStorage - if err := store.CompleteWithOptions(options); err != nil { - panic(err) // TODO: Propagate error up - } - return &REST{store} -} diff --git a/pkg/registry/extensions/thirdpartyresource/storage/storage_test.go b/pkg/registry/extensions/thirdpartyresource/storage/storage_test.go deleted file mode 100644 index cbcf60a4d5e..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/storage/storage_test.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/extensions" - // Ensure that extensions/v1beta1 package is initialized. - _ "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing" - "k8s.io/kubernetes/pkg/registry/registrytest" -) - -func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) { - etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName) - restOptions := generic.RESTOptions{ - StorageConfig: etcdStorage, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: 1, - ResourcePrefix: "thirdpartyresources", - } - return NewREST(restOptions), server -} - -func validNewThirdPartyResource(name string) *extensions.ThirdPartyResource { - return &extensions.ThirdPartyResource{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Versions: []extensions.APIVersion{ - { - Name: "v1", - }, - }, - } -} - -func namer(i int) string { - return fmt.Sprintf("kind%d.example.com", i) -} - -func TestCreate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer).GeneratesName() - rsrc := validNewThirdPartyResource("kind.domain.tld") - test.TestCreate( - // valid - rsrc, - // invalid - &extensions.ThirdPartyResource{}, - &extensions.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: "kind"}, Versions: []extensions.APIVersion{{Name: "v1"}}}, - &extensions.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: "kind.tld"}, Versions: []extensions.APIVersion{{Name: "v1"}}}, - &extensions.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: "kind.domain.tld"}, Versions: []extensions.APIVersion{{Name: "v.1"}}}, - &extensions.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: "kind.domain.tld"}, Versions: []extensions.APIVersion{{Name: "stable/v1"}}}, - ) -} - -func TestUpdate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer) - test.TestUpdate( - // valid - validNewThirdPartyResource("kind.domain.tld"), - // updateFunc - func(obj runtime.Object) runtime.Object { - object := obj.(*extensions.ThirdPartyResource) - object.Description = "new description" - return object - }, - ) -} - -func TestDelete(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer) - test.TestDelete(validNewThirdPartyResource("kind.domain.tld")) -} - -func TestGet(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer) - test.TestGet(validNewThirdPartyResource("kind.domain.tld")) -} - -func TestList(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer) - test.TestList(validNewThirdPartyResource("kind.domain.tld")) -} - -func TestWatch(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store).ClusterScope().Namer(namer) - test.TestWatch( - validNewThirdPartyResource("kind.domain.tld"), - // matching labels - []labels.Set{}, - // not matching labels - []labels.Set{ - {"foo": "bar"}, - }, - // matching fields - []fields.Set{}, - // not matching fields - []fields.Set{ - {"metadata.name": "bar"}, - {"name": "foo"}, - }, - ) -} diff --git a/pkg/registry/extensions/thirdpartyresource/strategy.go b/pkg/registry/extensions/thirdpartyresource/strategy.go deleted file mode 100644 index baf13844c87..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/strategy.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresource - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/apiserver/pkg/storage" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/validation" -) - -// strategy implements behavior for ThirdPartyResource objects -type strategy struct { - runtime.ObjectTyper -} - -// Strategy is the default logic that applies when creating and updating ThirdPartyResource -// objects via the REST API. -var Strategy = strategy{api.Scheme} - -var _ = rest.RESTCreateStrategy(Strategy) - -var _ = rest.RESTUpdateStrategy(Strategy) - -func (strategy) NamespaceScoped() bool { - return false -} - -func (strategy) GenerateName(base string) string { - return "" -} - -func (strategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { -} - -func (strategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResource(obj.(*extensions.ThirdPartyResource)) -} - -// Canonicalize normalizes the object after validation. -func (strategy) Canonicalize(obj runtime.Object) { -} - -func (strategy) AllowCreateOnUpdate() bool { - return false -} - -func (strategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { -} - -func (strategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceUpdate(obj.(*extensions.ThirdPartyResource), old.(*extensions.ThirdPartyResource)) -} - -func (strategy) AllowUnconditionalUpdate() bool { - return true -} - -// GetAttrs returns labels and fields of a given object for filtering purposes. -func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) { - tpr, ok := obj.(*extensions.ThirdPartyResource) - if !ok { - return nil, nil, false, fmt.Errorf("not a ThirdPartyResource") - } - return labels.Set(tpr.Labels), SelectableFields(tpr), tpr.Initializers != nil, nil -} - -// Matcher returns a generic matcher for a given label and field selector. -func Matcher(label labels.Selector, field fields.Selector) storage.SelectionPredicate { - return storage.SelectionPredicate{ - Label: label, - Field: field, - GetAttrs: GetAttrs, - } -} - -// SelectableFields returns a field set that can be used for filter selection -func SelectableFields(obj *extensions.ThirdPartyResource) fields.Set { - return nil -} diff --git a/pkg/registry/extensions/thirdpartyresource/strategy_test.go b/pkg/registry/extensions/thirdpartyresource/strategy_test.go deleted file mode 100644 index 927d2713b5f..00000000000 --- a/pkg/registry/extensions/thirdpartyresource/strategy_test.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresource - -import ( - "testing" - - _ "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - apitesting "k8s.io/kubernetes/pkg/api/testing" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func TestSelectableFieldLabelConversions(t *testing.T) { - apitesting.TestSelectableFieldLabelConversionsOfKind(t, - testapi.Extensions.GroupVersion().String(), - "ThirdPartyResource", - SelectableFields(&extensions.ThirdPartyResource{}), - nil, - ) -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/BUILD b/pkg/registry/extensions/thirdpartyresourcedata/BUILD deleted file mode 100644 index 6e5ad937bc0..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/BUILD +++ /dev/null @@ -1,80 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "codec.go", - "doc.go", - "registry.go", - "strategy.go", - "util.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "codec_test.go", - "strategy_test.go", - "util_test.go", - ], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/registry/extensions/thirdpartyresourcedata/storage:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/pkg/registry/extensions/thirdpartyresourcedata/codec.go b/pkg/registry/extensions/thirdpartyresourcedata/codec.go deleted file mode 100644 index 573d7b5bf87..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/codec.go +++ /dev/null @@ -1,593 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "bytes" - gojson "encoding/json" - "fmt" - "io" - "net/url" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/kubernetes/pkg/api" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -type thirdPartyObjectConverter struct { - converter runtime.ObjectConvertor -} - -func (t *thirdPartyObjectConverter) ConvertToVersion(in runtime.Object, outVersion runtime.GroupVersioner) (out runtime.Object, err error) { - switch in.(type) { - // This seems weird, but in this case the ThirdPartyResourceData is really just a wrapper on the raw 3rd party data. - // The actual thing printed/sent to server is the actual raw third party resource data, which only has one version. - case *extensions.ThirdPartyResourceData: - return in, nil - default: - return t.converter.ConvertToVersion(in, outVersion) - } -} - -func (t *thirdPartyObjectConverter) Convert(in, out, context interface{}) error { - return t.converter.Convert(in, out, context) -} - -func (t *thirdPartyObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return t.converter.ConvertFieldLabel(version, kind, label, value) -} - -func NewThirdPartyObjectConverter(converter runtime.ObjectConvertor) runtime.ObjectConvertor { - return &thirdPartyObjectConverter{converter} -} - -type thirdPartyResourceDataMapper struct { - mapper meta.RESTMapper - kind string - version string - group string -} - -var _ meta.RESTMapper = &thirdPartyResourceDataMapper{} - -func (t *thirdPartyResourceDataMapper) getResource() schema.GroupVersionResource { - plural, _ := meta.UnsafeGuessKindToResource(t.getKind()) - - return plural -} - -func (t *thirdPartyResourceDataMapper) getKind() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: t.group, Version: t.version, Kind: t.kind} -} - -func (t *thirdPartyResourceDataMapper) isThirdPartyResource(partialResource schema.GroupVersionResource) bool { - actualResource := t.getResource() - if strings.ToLower(partialResource.Resource) != strings.ToLower(actualResource.Resource) { - return false - } - if len(partialResource.Group) != 0 && partialResource.Group != actualResource.Group { - return false - } - if len(partialResource.Version) != 0 && partialResource.Version != actualResource.Version { - return false - } - - return true -} - -func (t *thirdPartyResourceDataMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return []schema.GroupVersionResource{t.getResource()}, nil - } - return t.mapper.ResourcesFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return []schema.GroupVersionKind{t.getKind()}, nil - } - return t.mapper.KindsFor(resource) -} - -func (t *thirdPartyResourceDataMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return t.getResource(), nil - } - return t.mapper.ResourceFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return t.getKind(), nil - } - return t.mapper.KindFor(resource) -} - -func (t *thirdPartyResourceDataMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if len(versions) != 1 { - return nil, fmt.Errorf("unexpected set of versions: %v", versions) - } - if gk.Group != t.group { - return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) - } - if gk.Kind != "ThirdPartyResourceData" { - return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) - } - if versions[0] != t.version { - return nil, fmt.Errorf("unknown version %q expected %q", versions[0], t.version) - } - - // TODO figure out why we're doing this rewriting - extensionGK := schema.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} - - mapping, err := t.mapper.RESTMapping(extensionGK, api.Registry.GroupOrDie(extensions.GroupName).GroupVersion.Version) - if err != nil { - return nil, err - } - mapping.ObjectConvertor = &thirdPartyObjectConverter{mapping.ObjectConvertor} - return mapping, nil -} - -func (t *thirdPartyResourceDataMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if gk.Group != t.group { - return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) - } - if gk.Kind != "ThirdPartyResourceData" { - return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) - } - - // TODO figure out why we're doing this rewriting - extensionGK := schema.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} - - mappings, err := t.mapper.RESTMappings(extensionGK, versions...) - if err != nil { - return nil, err - } - for _, m := range mappings { - m.ObjectConvertor = &thirdPartyObjectConverter{m.ObjectConvertor} - } - return mappings, nil -} - -func (t *thirdPartyResourceDataMapper) ResourceSingularizer(resource string) (singular string, err error) { - return t.mapper.ResourceSingularizer(resource) -} - -func NewMapper(mapper meta.RESTMapper, kind, version, group string) meta.RESTMapper { - return &thirdPartyResourceDataMapper{ - mapper: mapper, - kind: kind, - version: version, - group: group, - } -} - -type thirdPartyResourceDataCodecFactory struct { - delegate runtime.NegotiatedSerializer - kind string - encodeGV schema.GroupVersion - decodeGV schema.GroupVersion -} - -func NewNegotiatedSerializer(s runtime.NegotiatedSerializer, kind string, encodeGV, decodeGV schema.GroupVersion) runtime.NegotiatedSerializer { - return &thirdPartyResourceDataCodecFactory{ - delegate: s, - kind: kind, - encodeGV: encodeGV, - decodeGV: decodeGV, - } -} - -func (t *thirdPartyResourceDataCodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { - for _, info := range t.delegate.SupportedMediaTypes() { - if info.MediaType == runtime.ContentTypeJSON { - return []runtime.SerializerInfo{info} - } - } - return nil -} - -func (t *thirdPartyResourceDataCodecFactory) EncoderForVersion(s runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: t.delegate.EncoderForVersion(s, gv), gvk: t.encodeGV.WithKind(t.kind)} -} - -func (t *thirdPartyResourceDataCodecFactory) DecoderToVersion(s runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return NewDecoder(t.delegate.DecoderToVersion(s, gv), t.kind) -} - -func NewCodec(delegate runtime.Codec, gvk schema.GroupVersionKind) runtime.Codec { - return runtime.NewCodec(NewEncoder(delegate, gvk), NewDecoder(delegate, gvk.Kind)) -} - -type thirdPartyResourceDataDecoder struct { - delegate runtime.Decoder - kind string -} - -func NewDecoder(delegate runtime.Decoder, kind string) runtime.Decoder { - return &thirdPartyResourceDataDecoder{delegate: delegate, kind: kind} -} - -var _ runtime.Decoder = &thirdPartyResourceDataDecoder{} - -func parseObject(data []byte) (map[string]interface{}, error) { - var mapObj map[string]interface{} - if err := json.Unmarshal(data, &mapObj); err != nil { - return nil, err - } - - return mapObj, nil -} - -func (t *thirdPartyResourceDataDecoder) populate(data []byte) (runtime.Object, *schema.GroupVersionKind, error) { - mapObj, err := parseObject(data) - if err != nil { - return nil, nil, err - } - return t.populateFromObject(mapObj, data) -} - -func (t *thirdPartyResourceDataDecoder) populateFromObject(mapObj map[string]interface{}, data []byte) (runtime.Object, *schema.GroupVersionKind, error) { - typeMeta := metav1.TypeMeta{} - if err := json.Unmarshal(data, &typeMeta); err != nil { - return nil, nil, err - } - - gv, err := schema.ParseGroupVersion(typeMeta.APIVersion) - if err != nil { - return nil, nil, err - } - gvk := gv.WithKind(typeMeta.Kind) - - isList := strings.HasSuffix(typeMeta.Kind, "List") - switch { - case !isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind): - result := &extensions.ThirdPartyResourceData{} - if err := t.populateResource(result, mapObj, data); err != nil { - return nil, nil, err - } - return result, &gvk, nil - case isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind+"List"): - list := &extensions.ThirdPartyResourceDataList{} - if err := t.populateListResource(list, mapObj); err != nil { - return nil, nil, err - } - return list, &gvk, nil - default: - return nil, nil, fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) - } -} - -func (t *thirdPartyResourceDataDecoder) populateResource(objIn *extensions.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { - metadata, ok := mapObj["metadata"].(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) - } - - metadataData, err := json.Marshal(metadata) - if err != nil { - return err - } - - if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { - return err - } - - // Override API Version with the ThirdPartyResourceData value - // TODO: fix this hard code - objIn.APIVersion = v1beta1.SchemeGroupVersion.String() - - objIn.Data = data - return nil -} - -func IsThirdPartyObject(rawData []byte, gvk *schema.GroupVersionKind) (isThirdParty bool, gvkOut *schema.GroupVersionKind, err error) { - var gv schema.GroupVersion - if gvk == nil { - data, err := yaml.ToJSON(rawData) - if err != nil { - return false, nil, err - } - metadata := metav1.TypeMeta{} - if err = json.Unmarshal(data, &metadata); err != nil { - return false, nil, err - } - gv, err = schema.ParseGroupVersion(metadata.APIVersion) - if err != nil { - return false, nil, err - } - gvkOut = &schema.GroupVersionKind{ - Group: gv.Group, - Version: gv.Version, - Kind: metadata.Kind, - } - } else { - gv = gvk.GroupVersion() - gvkOut = gvk - } - return api.Registry.IsThirdPartyAPIGroupVersion(gv), gvkOut, nil -} - -func (t *thirdPartyResourceDataDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - if into == nil { - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - return t.populate(data) - } - switch o := into.(type) { - case *extensions.ThirdPartyResourceData: - break - case *runtime.VersionedObjects: - // We're not sure that it's third party, we need to test - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - obj, outGVK, err := t.populate(data) - if err != nil { - return nil, nil, err - } - o.Objects = []runtime.Object{ - obj, - } - return o, outGVK, nil - default: - if gvk != nil && api.Registry.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { - // delegate won't recognize a thirdparty group version - gvk = nil - } - return t.delegate.Decode(data, gvk, into) - } - - thirdParty := into.(*extensions.ThirdPartyResourceData) - var mapObj map[string]interface{} - if err := json.Unmarshal(data, &mapObj); err != nil { - return nil, nil, err - } - - /*if gvk.Kind != "ThirdPartyResourceData" { - return nil, nil, fmt.Errorf("unexpected kind: %s", gvk.Kind) - }*/ - actual := &schema.GroupVersionKind{} - if kindObj, found := mapObj["kind"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingKindErr(string(data)) - } - mapObj["kind"] = gvk.Kind - actual.Kind = gvk.Kind - } else { - kindStr, ok := kindObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'kind': %v", kindObj) - } - if len(t.kind) > 0 && kindStr != t.kind { - return nil, nil, fmt.Errorf("kind doesn't match, expecting: %s, got %s", t.kind, kindStr) - } - actual.Kind = kindStr - } - if versionObj, found := mapObj["apiVersion"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingVersionErr(string(data)) - } - mapObj["apiVersion"] = gvk.GroupVersion().String() - actual.Group, actual.Version = gvk.Group, gvk.Version - } else { - versionStr, ok := versionObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'apiVersion': %v", versionObj) - } - if gvk != nil && versionStr != gvk.GroupVersion().String() { - return nil, nil, fmt.Errorf("version doesn't match, expecting: %v, got %s", gvk.GroupVersion(), versionStr) - } - gv, err := schema.ParseGroupVersion(versionStr) - if err != nil { - return nil, nil, err - } - actual.Group, actual.Version = gv.Group, gv.Version - } - - mapObj, err := parseObject(data) - if err != nil { - return nil, actual, err - } - if err := t.populateResource(thirdParty, mapObj, data); err != nil { - return nil, actual, err - } - return thirdParty, actual, nil -} - -func (t *thirdPartyResourceDataDecoder) populateListResource(objIn *extensions.ThirdPartyResourceDataList, mapObj map[string]interface{}) error { - items, ok := mapObj["items"].([]interface{}) - if !ok { - return fmt.Errorf("unexpected object for items: %#v", mapObj["items"]) - } - objIn.Items = make([]extensions.ThirdPartyResourceData, len(items)) - for ix := range items { - objData, err := json.Marshal(items[ix]) - if err != nil { - return err - } - objMap, err := parseObject(objData) - if err != nil { - return err - } - if err := t.populateResource(&objIn.Items[ix], objMap, objData); err != nil { - return err - } - } - return nil -} - -type thirdPartyResourceDataEncoder struct { - delegate runtime.Encoder - gvk schema.GroupVersionKind -} - -func NewEncoder(delegate runtime.Encoder, gvk schema.GroupVersionKind) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: delegate, gvk: gvk} -} - -var _ runtime.Encoder = &thirdPartyResourceDataEncoder{} - -func encodeToJSON(obj *extensions.ThirdPartyResourceData, stream io.Writer) error { - var objMap map[string]interface{} - if err := json.Unmarshal(obj.Data, &objMap); err != nil { - return err - } - - objMap["metadata"] = &obj.ObjectMeta - encoder := json.NewEncoder(stream) - return encoder.Encode(objMap) -} - -func (t *thirdPartyResourceDataEncoder) Encode(obj runtime.Object, stream io.Writer) (err error) { - switch obj := obj.(type) { - case *extensions.ThirdPartyResourceData: - return encodeToJSON(obj, stream) - case *extensions.ThirdPartyResourceDataList: - // TODO: There are likely still better ways to do this... - listItems := make([]gojson.RawMessage, len(obj.Items)) - - for ix := range obj.Items { - buff := &bytes.Buffer{} - err := encodeToJSON(&obj.Items[ix], buff) - if err != nil { - return err - } - listItems[ix] = gojson.RawMessage(buff.Bytes()) - } - - if t.gvk.Empty() { - return fmt.Errorf("thirdPartyResourceDataEncoder was not given a target version") - } - - encMap := struct { - // +optional - Kind string `json:"kind,omitempty"` - Items []gojson.RawMessage `json:"items"` - // +optional - Metadata metav1.ListMeta `json:"metadata,omitempty"` - // +optional - APIVersion string `json:"apiVersion,omitempty"` - }{ - Kind: t.gvk.Kind + "List", - Items: listItems, - Metadata: obj.ListMeta, - APIVersion: t.gvk.GroupVersion().String(), - } - - encBytes, err := json.Marshal(encMap) - if err != nil { - return err - } - - _, err = stream.Write(encBytes) - return err - case *metav1.InternalEvent: - event := &metav1.WatchEvent{} - err := metav1.Convert_versioned_InternalEvent_to_versioned_Event(obj, event, nil) - if err != nil { - return err - } - - enc := json.NewEncoder(stream) - err = enc.Encode(event) - if err != nil { - return err - } - - return nil - case *metav1.WatchEvent: - // This is the same as the InternalEvent case above, except the caller - // already did the conversion for us (see #44350). - // In theory, we probably don't need the InternalEvent case anymore, - // but the test coverage for TPR is too low to risk removing it. - return json.NewEncoder(stream).Encode(obj) - case *metav1.Status, *metav1.APIResourceList: - return t.delegate.Encode(obj, stream) - default: - return fmt.Errorf("unexpected object to encode: %#v", obj) - } -} - -func NewObjectCreator(group, version string, delegate runtime.ObjectCreater) runtime.ObjectCreater { - return &thirdPartyResourceDataCreator{group, version, delegate} -} - -type thirdPartyResourceDataCreator struct { - group string - version string - delegate runtime.ObjectCreater -} - -func (t *thirdPartyResourceDataCreator) New(kind schema.GroupVersionKind) (out runtime.Object, err error) { - switch kind.Kind { - case "ThirdPartyResourceData": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceData{}, nil - case "ThirdPartyResourceDataList": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceDataList{}, nil - // TODO: this list needs to be formalized higher in the chain - case "ListOptions", "WatchEvent": - if apiutil.GetGroupVersion(t.group, t.version) == kind.GroupVersion().String() { - // Translate third party group to external group. - gvk := api.Registry.EnabledVersionsForGroup(api.GroupName)[0].WithKind(kind.Kind) - return t.delegate.New(gvk) - } - return t.delegate.New(kind) - default: - return t.delegate.New(kind) - } -} - -func NewThirdPartyParameterCodec(p runtime.ParameterCodec) runtime.ParameterCodec { - return &thirdPartyParameterCodec{p} -} - -type thirdPartyParameterCodec struct { - delegate runtime.ParameterCodec -} - -func (t *thirdPartyParameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { - return t.delegate.DecodeParameters(parameters, v1.SchemeGroupVersion, into) -} - -func (t *thirdPartyParameterCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { - return t.delegate.EncodeParameters(obj, v1.SchemeGroupVersion) -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/codec_test.go b/pkg/registry/extensions/thirdpartyresourcedata/codec_test.go deleted file mode 100644 index 685a5e06920..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/codec_test.go +++ /dev/null @@ -1,329 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "bytes" - "encoding/json" - "reflect" - "strings" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -type Foo struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"` - - SomeField string `json:"someField"` - OtherField int `json:"otherField"` -} - -func (*Foo) GetObjectKind() schema.ObjectKind { - return schema.EmptyObjectKind -} - -type FooList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"` - - Items []Foo `json:"items"` -} - -func TestCodec(t *testing.T) { - tests := []struct { - into runtime.Object - obj *Foo - expectErr bool - name string - }{ - { - into: &runtime.VersionedObjects{}, - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{Name: "bar"}, - TypeMeta: metav1.TypeMeta{APIVersion: "company.com/v1", Kind: "Foo"}, - }, - expectErr: false, - name: "versioned objects list", - }, - { - obj: &Foo{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}, - expectErr: true, - name: "missing kind", - }, - { - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{Name: "bar"}, - TypeMeta: metav1.TypeMeta{APIVersion: "company.com/v1", Kind: "Foo"}, - }, - name: "basic", - }, - { - into: &extensions.ThirdPartyResourceData{}, - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{Name: "bar"}, - TypeMeta: metav1.TypeMeta{Kind: "ThirdPartyResourceData"}, - }, - expectErr: true, - name: "broken kind", - }, - { - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{Name: "bar", ResourceVersion: "baz"}, - TypeMeta: metav1.TypeMeta{APIVersion: "company.com/v1", Kind: "Foo"}, - }, - name: "resource version", - }, - { - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{ - Name: "bar", - CreationTimestamp: metav1.Time{Time: time.Unix(100, 0)}, - }, - TypeMeta: metav1.TypeMeta{ - APIVersion: "company.com/v1", - Kind: "Foo", - }, - }, - name: "creation time", - }, - { - obj: &Foo{ - ObjectMeta: metav1.ObjectMeta{ - Name: "bar", - ResourceVersion: "baz", - Labels: map[string]string{"foo": "bar", "baz": "blah"}, - }, - TypeMeta: metav1.TypeMeta{APIVersion: "company.com/v1", Kind: "Foo"}, - }, - name: "labels", - }, - } - api.Registry.AddThirdPartyAPIGroupVersions(schema.GroupVersion{Group: "company.com", Version: "v1"}) - for _, test := range tests { - d := &thirdPartyResourceDataDecoder{kind: "Foo", delegate: testapi.Extensions.Codec()} - e := &thirdPartyResourceDataEncoder{gvk: schema.GroupVersionKind{ - Group: "company.com", - Version: "v1", - Kind: "Foo", - }, delegate: testapi.Extensions.Codec()} - data, err := json.Marshal(test.obj) - if err != nil { - t.Errorf("[%s] unexpected error: %v", test.name, err) - continue - } - var obj runtime.Object - if test.into != nil { - err = runtime.DecodeInto(d, data, test.into) - obj = test.into - } else { - obj, err = runtime.Decode(d, data) - } - if err != nil && !test.expectErr { - t.Errorf("[%s] unexpected error: %v", test.name, err) - continue - } - if test.expectErr { - if err == nil { - t.Errorf("[%s] unexpected non-error", test.name) - } - continue - } - var rsrcObj *extensions.ThirdPartyResourceData - switch o := obj.(type) { - case *extensions.ThirdPartyResourceData: - rsrcObj = o - case *runtime.VersionedObjects: - rsrcObj = o.First().(*extensions.ThirdPartyResourceData) - default: - t.Errorf("[%s] unexpected object: %v", test.name, obj) - continue - } - if !reflect.DeepEqual(rsrcObj.ObjectMeta, test.obj.ObjectMeta) { - t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, rsrcObj.ObjectMeta, test.obj.ObjectMeta) - } - var output Foo - if err := json.Unmarshal(rsrcObj.Data, &output); err != nil { - t.Errorf("[%s] unexpected error: %v", test.name, err) - continue - } - if !reflect.DeepEqual(&output, test.obj) { - t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, test.obj, &output) - } - - data, err = runtime.Encode(e, rsrcObj) - if err != nil { - t.Errorf("[%s] unexpected error: %v", test.name, err) - } - - var output2 Foo - if err := json.Unmarshal(data, &output2); err != nil { - t.Errorf("[%s] unexpected error: %v", test.name, err) - continue - } - if !reflect.DeepEqual(&output2, test.obj) { - t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, test.obj, &output2) - } - } -} - -func TestCreater(t *testing.T) { - creater := NewObjectCreator("creater group", "creater version", api.Scheme) - tests := []struct { - name string - kind schema.GroupVersionKind - expectedObj runtime.Object - expectErr bool - }{ - { - name: "valid ThirdPartyResourceData creation", - kind: schema.GroupVersionKind{Group: "creater group", Version: "creater version", Kind: "ThirdPartyResourceData"}, - expectedObj: &extensions.ThirdPartyResourceData{}, - expectErr: false, - }, - { - name: "invalid ThirdPartyResourceData creation", - kind: schema.GroupVersionKind{Version: "invalid version", Kind: "ThirdPartyResourceData"}, - expectedObj: nil, - expectErr: true, - }, - { - name: "valid ListOptions creation", - kind: schema.GroupVersionKind{Version: "v1", Kind: "ListOptions"}, - expectedObj: &metav1.ListOptions{}, - expectErr: false, - }, - } - for _, test := range tests { - out, err := creater.New(test.kind) - if err != nil && !test.expectErr { - t.Errorf("[%s] unexpected error: %v", test.name, err) - } - if err == nil && test.expectErr { - t.Errorf("[%s] unexpected non-error", test.name) - } - if !reflect.DeepEqual(test.expectedObj, out) { - t.Errorf("[%s] unexpected error: expect: %v, got: %v", test.name, test.expectedObj, out) - } - - } -} - -func TestEncodeToStreamForInternalEvent(t *testing.T) { - e := &thirdPartyResourceDataEncoder{gvk: schema.GroupVersionKind{ - Group: "company.com", - Version: "v1", - Kind: "Foo", - }, delegate: testapi.Extensions.Codec()} - buf := bytes.NewBuffer([]byte{}) - expected := &metav1.WatchEvent{ - Type: "Added", - } - err := e.Encode(&metav1.InternalEvent{ - Type: "Added", - }, buf) - - jBytes, _ := json.Marshal(expected) - - if string(jBytes) == buf.String() { - t.Errorf("unexpected encoding expected %s got %s", string(jBytes), buf.String()) - } - - if err != nil { - t.Errorf("unexpected error encoding: %v", err) - } -} - -func TestThirdPartyResourceDataListEncoding(t *testing.T) { - gv := schema.GroupVersion{Group: "stable.foo.faz", Version: "v1"} - gvk := gv.WithKind("Bar") - e := &thirdPartyResourceDataEncoder{delegate: testapi.Extensions.Codec(), gvk: gvk} - subject := &extensions.ThirdPartyResourceDataList{} - - buf := bytes.NewBuffer([]byte{}) - err := e.Encode(subject, buf) - if err != nil { - t.Errorf("encoding unexpected error: %v", err) - } - - targetOutput := struct { - Kind string `json:"kind,omitempty"` - Items []json.RawMessage `json:"items"` - Metadata metav1.ListMeta `json:"metadata,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` - }{} - err = json.Unmarshal(buf.Bytes(), &targetOutput) - - if err != nil { - t.Errorf("unmarshal unexpected error: %v", err) - } - - if expectedKind := gvk.Kind + "List"; expectedKind != targetOutput.Kind { - t.Errorf("unexpected kind on list got %s expected %s", targetOutput.Kind, expectedKind) - } - - if targetOutput.Metadata != subject.ListMeta { - t.Errorf("metadata mismatch %v != %v", targetOutput.Metadata, subject.ListMeta) - } - - if targetOutput.APIVersion != gv.String() { - t.Errorf("apiversion mismatch %v != %v", targetOutput.APIVersion, gv.String()) - } -} - -func TestDecodeNumbers(t *testing.T) { - gv := schema.GroupVersion{Group: "stable.foo.faz", Version: "v1"} - gvk := gv.WithKind("Foo") - e := &thirdPartyResourceDataEncoder{delegate: testapi.Extensions.Codec(), gvk: gvk} - d := &thirdPartyResourceDataDecoder{kind: "Foo", delegate: testapi.Extensions.Codec()} - - // Use highest int64 number and 1000000. - subject := &extensions.ThirdPartyResourceDataList{ - Items: []extensions.ThirdPartyResourceData{ - { - Data: []byte(`{"num1": 9223372036854775807, "num2": 1000000}`), - }, - }, - } - - // Encode to get original JSON. - originalJSON := bytes.NewBuffer([]byte{}) - err := e.Encode(subject, originalJSON) - if err != nil { - t.Errorf("encoding unexpected error: %v", err) - } - - // Decode original JSON. - var into runtime.Object - into, _, err = d.Decode(originalJSON.Bytes(), &gvk, into) - if err != nil { - t.Errorf("decoding unexpected error: %v", err) - } - - // Check if int is preserved. - decodedJSON := into.(*extensions.ThirdPartyResourceDataList).Items[0].Data - if !strings.Contains(string(decodedJSON), `"num1":9223372036854775807,"num2":1000000`) { - t.Errorf("Expected %s, got %s", `"num1":9223372036854775807,"num2":1000000`, string(decodedJSON)) - } -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/doc.go b/pkg/registry/extensions/thirdpartyresourcedata/doc.go deleted file mode 100644 index c5555ce4fed..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package thirdpartyresourcedata provides Registry interface and its REST -// implementation for storing ThirdPartyResourceData api objects. -package thirdpartyresourcedata // import "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" diff --git a/pkg/registry/extensions/thirdpartyresourcedata/registry.go b/pkg/registry/extensions/thirdpartyresourcedata/registry.go deleted file mode 100644 index d8266046add..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/registry.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. -type Registry interface { - ListThirdPartyResourceData(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*extensions.ThirdPartyResourceDataList, error) - WatchThirdPartyResourceData(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) - GetThirdPartyResourceData(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*extensions.ThirdPartyResourceData, error) - CreateThirdPartyResourceData(ctx genericapirequest.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - UpdateThirdPartyResourceData(ctx genericapirequest.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - DeleteThirdPartyResourceData(ctx genericapirequest.Context, name string) error -} - -// storage puts strong typing around storage calls -type storage struct { - rest.StandardStorage -} - -// NewRegistry returns a new Registry interface for the given Storage. Any mismatched -// types will panic. -func NewRegistry(s rest.StandardStorage) Registry { - return &storage{s} -} - -func (s *storage) ListThirdPartyResourceData(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*extensions.ThirdPartyResourceDataList, error) { - obj, err := s.List(ctx, options) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceDataList), nil -} - -func (s *storage) WatchThirdPartyResourceData(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetThirdPartyResourceData(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Get(ctx, name, options) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceData), nil -} - -func (s *storage) CreateThirdPartyResourceData(ctx genericapirequest.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Create(ctx, ThirdPartyResourceData, false) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) UpdateThirdPartyResourceData(ctx genericapirequest.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, _, err := s.Update(ctx, ThirdPartyResourceData.Name, rest.DefaultUpdatedObjectInfo(ThirdPartyResourceData, api.Scheme)) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) DeleteThirdPartyResourceData(ctx genericapirequest.Context, name string) error { - _, _, err := s.Delete(ctx, name, nil) - return err -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/storage/BUILD b/pkg/registry/extensions/thirdpartyresourcedata/storage/BUILD deleted file mode 100644 index c8a16f61dd7..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/storage/BUILD +++ /dev/null @@ -1,60 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["storage_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/apis/extensions:go_default_library", - "//pkg/registry/registrytest:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["storage.go"], - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/registry/cachesize:go_default_library", - "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go b/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go deleted file mode 100644 index 3693dff82b8..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "strings" - "sync/atomic" - - "k8s.io/apimachinery/pkg/api/errors" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" - "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" -) - -// errFrozen is a transient error to indicate that clients should retry with backoff. -var errFrozen = errors.NewServiceUnavailable("TPR data is temporarily frozen") - -// REST implements a RESTStorage for ThirdPartyResourceData. -type REST struct { - *genericregistry.Store - kind string - frozen atomic.Value -} - -// Freeze causes all future calls to Create/Update/Delete/DeleteCollection to return a transient error. -// This is irreversible and meant for use when the TPR data is being deleted or migrated/abandoned. -func (r *REST) Freeze() { - r.frozen.Store(true) -} - -func (r *REST) isFrozen() bool { - return r.frozen.Load() != nil -} - -// Create is a wrapper to support Freeze. -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { - if r.isFrozen() { - return nil, errFrozen - } - return r.Store.Create(ctx, obj, includeUninitialized) -} - -// Update is a wrapper to support Freeze. -func (r *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - if r.isFrozen() { - return nil, false, errFrozen - } - return r.Store.Update(ctx, name, objInfo) -} - -// Delete is a wrapper to support Freeze. -func (r *REST) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, bool, error) { - if r.isFrozen() { - return nil, false, errFrozen - } - return r.Store.Delete(ctx, name, options) -} - -// DeleteCollection is a wrapper to support Freeze. -func (r *REST) DeleteCollection(ctx genericapirequest.Context, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { - if r.isFrozen() { - return nil, errFrozen - } - return r.Store.DeleteCollection(ctx, options, listOptions) -} - -// NewREST returns a registry which will store ThirdPartyResourceData in the given helper -func NewREST(optsGetter generic.RESTOptionsGetter, group, kind string) *REST { - resource := extensions.Resource("thirdpartyresourcedatas") - opts, err := optsGetter.GetRESTOptions(resource) - if err != nil { - panic(err) // TODO: Propagate error up - } - - // We explicitly do NOT do any decoration here yet. - opts.Decorator = generic.UndecoratedStorage // TODO use watchCacheSize=-1 to signal UndecoratedStorage - opts.ResourcePrefix = "/ThirdPartyResourceData/" + group + "/" + strings.ToLower(kind) + "s" - - store := &genericregistry.Store{ - Copier: api.Scheme, - NewFunc: func() runtime.Object { return &extensions.ThirdPartyResourceData{} }, - NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceDataList{} }, - PredicateFunc: thirdpartyresourcedata.Matcher, - QualifiedResource: resource, - WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), - - CreateStrategy: thirdpartyresourcedata.Strategy, - UpdateStrategy: thirdpartyresourcedata.Strategy, - DeleteStrategy: thirdpartyresourcedata.Strategy, - } - options := &generic.StoreOptions{RESTOptions: opts, AttrFunc: thirdpartyresourcedata.GetAttrs} // Pass in opts to use UndecoratedStorage and custom ResourcePrefix - if err := store.CompleteWithOptions(options); err != nil { - panic(err) // TODO: Propagate error up - } - - return &REST{ - Store: store, - kind: kind, - } -} - -// Implements the rest.KindProvider interface -func (r *REST) Kind() string { - return r.kind -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go b/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go deleted file mode 100644 index 6b0b3405e6d..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/extensions" - // Ensure that extensions/v1beta1 package is initialized. - _ "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing" - "k8s.io/kubernetes/pkg/registry/registrytest" -) - -func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) { - etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName) - restOptions := generic.RESTOptions{StorageConfig: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1} - return NewREST(restOptions, "foo", "bar"), server -} - -func validNewThirdPartyResourceData(name string) *extensions.ThirdPartyResourceData { - return &extensions.ThirdPartyResourceData{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: metav1.NamespaceDefault, - }, - Data: []byte("foobarbaz"), - } -} - -func TestCreate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - rsrc := validNewThirdPartyResourceData("foo") - rsrc.ObjectMeta = metav1.ObjectMeta{} - test.TestCreate( - // valid - rsrc, - // invalid - &extensions.ThirdPartyResourceData{}, - ) -} - -func TestUpdate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - test.TestUpdate( - // valid - validNewThirdPartyResourceData("foo"), - // updateFunc - func(obj runtime.Object) runtime.Object { - object := obj.(*extensions.ThirdPartyResourceData) - object.Data = []byte("new description") - return object - }, - ) -} - -func TestDelete(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - test.TestDelete(validNewThirdPartyResourceData("foo")) -} - -func TestGet(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - test.TestGet(validNewThirdPartyResourceData("foo")) -} - -func TestList(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - test.TestList(validNewThirdPartyResourceData("foo")) -} - -func TestWatch(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := registrytest.New(t, storage.Store) - test.TestWatch( - validNewThirdPartyResourceData("foo"), - // matching labels - []labels.Set{}, - // not matching labels - []labels.Set{ - {"foo": "bar"}, - }, - // matching fields - []fields.Set{}, - // not matching fields - []fields.Set{ - {"metadata.name": "bar"}, - {"name": "foo"}, - }, - ) -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/strategy.go b/pkg/registry/extensions/thirdpartyresourcedata/strategy.go deleted file mode 100644 index ba3746d45c7..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/strategy.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/rest" - apistorage "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/validation" -) - -// strategy implements behavior for ThirdPartyResource objects -type strategy struct { - runtime.ObjectTyper - names.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating ThirdPartyResource -// objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} - -var _ = rest.RESTCreateStrategy(Strategy) - -var _ = rest.RESTUpdateStrategy(Strategy) - -func (strategy) NamespaceScoped() bool { - return true -} - -func (strategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { -} - -func (strategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceData(obj.(*extensions.ThirdPartyResourceData)) -} - -// Canonicalize normalizes the object after validation. -func (strategy) Canonicalize(obj runtime.Object) { -} - -func (strategy) AllowCreateOnUpdate() bool { - return false -} - -func (strategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { -} - -func (strategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceDataUpdate(obj.(*extensions.ThirdPartyResourceData), old.(*extensions.ThirdPartyResourceData)) -} - -func (strategy) AllowUnconditionalUpdate() bool { - return true -} - -// GetAttrs returns labels and fields of a given object for filtering purposes. -func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) { - tprd, ok := obj.(*extensions.ThirdPartyResourceData) - if !ok { - return nil, nil, false, fmt.Errorf("not a ThirdPartyResourceData") - } - return labels.Set(tprd.Labels), SelectableFields(tprd), tprd.Initializers != nil, nil -} - -// Matcher returns a generic matcher for a given label and field selector. -func Matcher(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate { - return apistorage.SelectionPredicate{ - Label: label, - Field: field, - GetAttrs: GetAttrs, - } -} - -// SelectableFields returns a field set that can be used for filter selection -func SelectableFields(obj *extensions.ThirdPartyResourceData) fields.Set { - return nil -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/strategy_test.go b/pkg/registry/extensions/thirdpartyresourcedata/strategy_test.go deleted file mode 100644 index 52079c0bb0e..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/strategy_test.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "testing" - - _ "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - apitesting "k8s.io/kubernetes/pkg/api/testing" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func TestSelectableFieldLabelConversions(t *testing.T) { - apitesting.TestSelectableFieldLabelConversionsOfKind(t, - testapi.Extensions.GroupVersion().String(), - "ThirdPartyResourceData", - SelectableFields(&extensions.ThirdPartyResourceData{}), - nil, - ) -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/util.go b/pkg/registry/extensions/thirdpartyresourcedata/util.go deleted file mode 100644 index 294760e5bf5..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/util.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func ExtractGroupVersionKind(list *extensions.ThirdPartyResourceList) ([]schema.GroupVersion, []schema.GroupVersionKind, error) { - gvs := []schema.GroupVersion{} - gvks := []schema.GroupVersionKind{} - for ix := range list.Items { - rsrc := &list.Items[ix] - kind, group, err := ExtractApiGroupAndKind(rsrc) - if err != nil { - return nil, nil, err - } - for _, version := range rsrc.Versions { - gv := schema.GroupVersion{Group: group, Version: version.Name} - gvs = append(gvs, gv) - gvks = append(gvks, schema.GroupVersionKind{Group: group, Version: version.Name, Kind: kind}) - } - } - return gvs, gvks, nil -} - -func convertToCamelCase(input string) string { - result := "" - toUpper := true - for ix := range input { - char := input[ix] - if toUpper { - result = result + string([]byte{(char - 32)}) - toUpper = false - } else if char == '-' { - toUpper = true - } else { - result = result + string([]byte{char}) - } - } - return result -} - -func ExtractApiGroupAndKind(rsrc *extensions.ThirdPartyResource) (kind string, group string, err error) { - parts := strings.Split(rsrc.Name, ".") - if len(parts) < 3 { - return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least ..", rsrc.Name) - } - return convertToCamelCase(parts[0]), strings.Join(parts[1:], "."), nil -} diff --git a/pkg/registry/extensions/thirdpartyresourcedata/util_test.go b/pkg/registry/extensions/thirdpartyresourcedata/util_test.go deleted file mode 100644 index 025cb55f80c..00000000000 --- a/pkg/registry/extensions/thirdpartyresourcedata/util_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func TestExtractAPIGroupAndKind(t *testing.T) { - tests := []struct { - input string - expectedKind string - expectedGroup string - expectErr bool - }{ - { - input: "foo.company.com", - expectedKind: "Foo", - expectedGroup: "company.com", - }, - { - input: "cron-tab.company.com", - expectedKind: "CronTab", - expectedGroup: "company.com", - }, - { - input: "foo", - expectErr: true, - }, - } - - for _, test := range tests { - kind, group, err := ExtractApiGroupAndKind(&extensions.ThirdPartyResource{ObjectMeta: metav1.ObjectMeta{Name: test.input}}) - if err != nil && !test.expectErr { - t.Errorf("unexpected error: %v", err) - continue - } - if err == nil && test.expectErr { - t.Errorf("unexpected non-error") - continue - } - if kind != test.expectedKind { - t.Errorf("expected: %s, saw: %s", test.expectedKind, kind) - } - if group != test.expectedGroup { - t.Errorf("expected: %s, saw: %s", test.expectedGroup, group) - } - } -} diff --git a/pkg/registry/rbac/reconciliation/BUILD b/pkg/registry/rbac/reconciliation/BUILD index 36a5cc05d91..7ffc2828e0c 100644 --- a/pkg/registry/rbac/reconciliation/BUILD +++ b/pkg/registry/rbac/reconciliation/BUILD @@ -37,6 +37,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/registry/rbac/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/registry/rbac/reconciliation/role_interfaces.go b/pkg/registry/rbac/reconciliation/role_interfaces.go index b3bc3c882b0..9cabec62318 100644 --- a/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -17,8 +17,11 @@ limitations under the License. package reconciliation import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" ) @@ -59,7 +62,8 @@ func (o RoleRuleOwner) SetRules(in []rbac.PolicyRule) { } type RoleModifier struct { - Client internalversion.RolesGetter + Client internalversion.RolesGetter + NamespaceClient core.NamespaceInterface } func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { @@ -71,6 +75,11 @@ func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { } func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { + ns := &api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} + if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + ret, err := c.Client.Roles(in.GetNamespace()).Create(in.(RoleRuleOwner).Role) if err != nil { return nil, err diff --git a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 6ec14ab2dbf..fde4b1e67b7 100644 --- a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -17,9 +17,12 @@ limitations under the License. package reconciliation import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" ) @@ -68,7 +71,8 @@ func (o RoleBindingAdapter) SetSubjects(in []rbac.Subject) { } type RoleBindingClientAdapter struct { - Client internalversion.RoleBindingsGetter + Client internalversion.RoleBindingsGetter + NamespaceClient core.NamespaceInterface } func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { @@ -80,6 +84,11 @@ func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, erro } func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { + ns := &api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} + if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(in.(RoleBindingAdapter).RoleBinding) if err != nil { return nil, err diff --git a/pkg/registry/rbac/rest/BUILD b/pkg/registry/rbac/rest/BUILD index 05f9b470f72..38d373335f6 100644 --- a/pkg/registry/rbac/rest/BUILD +++ b/pkg/registry/rbac/rest/BUILD @@ -14,6 +14,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/client/retry:go_default_library", "//pkg/registry/rbac/clusterrole:go_default_library", diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index 76cf37a135f..cdb38b5328b 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -36,6 +36,7 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" rbacclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" "k8s.io/kubernetes/pkg/client/retry" "k8s.io/kubernetes/pkg/registry/rbac/clusterrole" @@ -134,6 +135,13 @@ func PostStartHook(hookContext genericapiserver.PostStartHookContext) error { // intializing roles is really important. On some e2e runs, we've seen cases where etcd is down when the server // starts, the roles don't initialize, and nothing works. err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) { + + coreclientset, err := coreclient.NewForConfig(hookContext.LoopbackClientConfig) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to initialize client: %v", err)) + return false, nil + } + clientset, err := rbacclient.NewForConfig(hookContext.LoopbackClientConfig) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to initialize client: %v", err)) @@ -212,7 +220,7 @@ func PostStartHook(hookContext genericapiserver.PostStartHookContext) error { for _, role := range roles { opts := reconciliation.ReconcileRoleOptions{ Role: reconciliation.RoleRuleOwner{Role: &role}, - Client: reconciliation.RoleModifier{Client: clientset}, + Client: reconciliation.RoleModifier{Client: clientset, NamespaceClient: coreclientset.Namespaces()}, Confirm: true, } err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { @@ -242,7 +250,7 @@ func PostStartHook(hookContext genericapiserver.PostStartHookContext) error { for _, roleBinding := range roleBindings { opts := reconciliation.ReconcileRoleBindingOptions{ RoleBinding: reconciliation.RoleBindingAdapter{RoleBinding: &roleBinding}, - Client: reconciliation.RoleBindingClientAdapter{Client: clientset}, + Client: reconciliation.RoleBindingClientAdapter{Client: clientset, NamespaceClient: coreclientset.Namespaces()}, Confirm: true, } err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { diff --git a/pkg/registry/registrytest/OWNERS b/pkg/registry/registrytest/OWNERS index ca660369ed4..2bfe9edfb7b 100755 --- a/pkg/registry/registrytest/OWNERS +++ b/pkg/registry/registrytest/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - pmorie diff --git a/pkg/security/podsecuritypolicy/provider.go b/pkg/security/podsecuritypolicy/provider.go index 67f2d0906cd..6587267d93c 100644 --- a/pkg/security/podsecuritypolicy/provider.go +++ b/pkg/security/podsecuritypolicy/provider.go @@ -18,6 +18,7 @@ package podsecuritypolicy import ( "fmt" + "strings" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/api" @@ -225,7 +226,7 @@ func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field allErrs = append(allErrs, s.strategies.SysctlsStrategy.Validate(pod)...) - // TODO(timstclair): ValidatePodSecurityContext should be renamed to ValidatePod since its scope + // TODO(tallclair): ValidatePodSecurityContext should be renamed to ValidatePod since its scope // is not limited to the PodSecurityContext. if len(pod.Spec.Volumes) > 0 && !psputil.PSPAllowsAllVolumes(s.psp) { allowedVolumes := psputil.FSTypeToStringSet(s.psp.Spec.Volumes) @@ -308,7 +309,7 @@ func (s *simpleProvider) hasInvalidHostPort(container *api.Container, fldPath *f allErrs := field.ErrorList{} for _, cp := range container.Ports { if cp.HostPort > 0 && !s.isValidHostPort(int(cp.HostPort)) { - detail := fmt.Sprintf("Host port %d is not allowed to be used. Allowed ports: %v", cp.HostPort, s.psp.Spec.HostPorts) + detail := fmt.Sprintf("Host port %d is not allowed to be used. Allowed ports: [%s]", cp.HostPort, hostPortRangesToString(s.psp.Spec.HostPorts)) allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPort"), cp.HostPort, detail)) } } @@ -329,3 +330,19 @@ func (s *simpleProvider) isValidHostPort(port int) bool { func (s *simpleProvider) GetPSPName() string { return s.psp.Name } + +func hostPortRangesToString(ranges []extensions.HostPortRange) string { + formattedString := "" + if ranges != nil { + strRanges := []string{} + for _, r := range ranges { + if r.Min == r.Max { + strRanges = append(strRanges, fmt.Sprintf("%d", r.Min)) + } else { + strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) + } + } + formattedString = strings.Join(strRanges, ",") + } + return formattedString +} diff --git a/pkg/security/podsecuritypolicy/provider_test.go b/pkg/security/podsecuritypolicy/provider_test.go index 94e33340aee..767172ed512 100644 --- a/pkg/security/podsecuritypolicy/provider_test.go +++ b/pkg/security/podsecuritypolicy/provider_test.go @@ -463,7 +463,7 @@ func TestValidateContainerSecurityContextFailures(t *testing.T) { "failHostPortPSP": { pod: failHostPortPod, psp: defaultPSP(), - expectedError: "Host port 1 is not allowed to be used. Allowed ports: []", + expectedError: "Host port 1 is not allowed to be used. Allowed ports: []", }, "failReadOnlyRootFS - nil": { pod: defaultPod(), @@ -498,7 +498,7 @@ func TestValidateContainerSecurityContextFailures(t *testing.T) { continue } if !strings.Contains(errs[0].Error(), v.expectedError) { - t.Errorf("%s received unexpected error %v", k, errs) + t.Errorf("%s received unexpected error %v\nexpected: %s", k, errs, v.expectedError) } } } diff --git a/pkg/serviceaccount/jwt.go b/pkg/serviceaccount/jwt.go index 9135df00785..83efe5be0aa 100644 --- a/pkg/serviceaccount/jwt.go +++ b/pkg/serviceaccount/jwt.go @@ -290,6 +290,10 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) return nil, false, errors.New("Token has been invalidated") } + if secret.DeletionTimestamp != nil { + glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + return nil, false, errors.New("Token has been invalidated") + } if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(token)) != 0 { glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, false, errors.New("Token does not match server's copy") @@ -301,6 +305,10 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) return nil, false, err } + if serviceAccount.DeletionTimestamp != nil { + glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) + return nil, false, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName) + } if string(serviceAccount.UID) != serviceAccountUID { glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) return nil, false, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) diff --git a/pkg/util/BUILD b/pkg/util/BUILD index 6dcbb4086b3..a9f039df226 100644 --- a/pkg/util/BUILD +++ b/pkg/util/BUILD @@ -13,7 +13,6 @@ go_library( srcs = [ "doc.go", "template.go", - "umask.go", "util.go", ], tags = ["automanaged"], @@ -48,24 +47,19 @@ filegroup( "//pkg/util/bandwidth:all-srcs", "//pkg/util/config:all-srcs", "//pkg/util/configz:all-srcs", - "//pkg/util/crlf:all-srcs", "//pkg/util/dbus:all-srcs", "//pkg/util/ebtables:all-srcs", "//pkg/util/env:all-srcs", - "//pkg/util/errors:all-srcs", "//pkg/util/exec:all-srcs", "//pkg/util/flock:all-srcs", - "//pkg/util/framer:all-srcs", "//pkg/util/goroutinemap:all-srcs", "//pkg/util/hash:all-srcs", "//pkg/util/i18n:all-srcs", "//pkg/util/initsystem:all-srcs", "//pkg/util/interrupt:all-srcs", - "//pkg/util/intstr:all-srcs", "//pkg/util/io:all-srcs", "//pkg/util/ipconfig:all-srcs", "//pkg/util/iptables:all-srcs", - "//pkg/util/json:all-srcs", "//pkg/util/keymutex:all-srcs", "//pkg/util/labels:all-srcs", "//pkg/util/limitwriter:all-srcs", @@ -79,13 +73,10 @@ filegroup( "//pkg/util/oom:all-srcs", "//pkg/util/parsers:all-srcs", "//pkg/util/procfs:all-srcs", - "//pkg/util/rand:all-srcs", "//pkg/util/removeall:all-srcs", "//pkg/util/resourcecontainer:all-srcs", "//pkg/util/rlimit:all-srcs", - "//pkg/util/runtime:all-srcs", "//pkg/util/selinux:all-srcs", - "//pkg/util/sets:all-srcs", "//pkg/util/slice:all-srcs", "//pkg/util/strings:all-srcs", "//pkg/util/sysctl:all-srcs", @@ -95,12 +86,8 @@ filegroup( "//pkg/util/term:all-srcs", "//pkg/util/threading:all-srcs", "//pkg/util/tolerations:all-srcs", - "//pkg/util/uuid:all-srcs", - "//pkg/util/validation:all-srcs", "//pkg/util/version:all-srcs", - "//pkg/util/wait:all-srcs", "//pkg/util/workqueue/prometheus:all-srcs", - "//pkg/util/yaml:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/util/ebtables/ebtables_test.go b/pkg/util/ebtables/ebtables_test.go index 37d5b5ad654..91f6feeb652 100644 --- a/pkg/util/ebtables/ebtables_test.go +++ b/pkg/util/ebtables/ebtables_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/util/exec" ) -func testEnsureChain(t *testing.T) { +func TestEnsureChain(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // Does not Exists @@ -75,7 +75,7 @@ func testEnsureChain(t *testing.T) { } } -func testEnsureRule(t *testing.T) { +func TestEnsureRule(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // Exists @@ -118,7 +118,7 @@ Bridge chain: TEST, entries: 0, policy: ACCEPT`), nil if exists { t.Errorf("expected exists = false") } - errStr := "Failed to ensure rule: exist 2, output: " + errStr := "Failed to ensure rule: exit 2, output: " if err == nil || err.Error() != errStr { t.Errorf("expected error: %q", errStr) } diff --git a/pkg/util/i18n/i18n.go b/pkg/util/i18n/i18n.go index d0335d6fbb2..aac3a304958 100644 --- a/pkg/util/i18n/i18n.go +++ b/pkg/util/i18n/i18n.go @@ -49,12 +49,12 @@ var knownTranslations = map[string][]string{ func loadSystemLanguage() string { langStr := os.Getenv("LANG") if langStr == "" { - glog.V(3).Infof("Couldn't find the LANG environment variable, defaulting to en-US") + glog.V(3).Infof("Couldn't find the LANG environment variable, defaulting to en_US") return "default" } pieces := strings.Split(langStr, ".") - if len(pieces) == 0 { - glog.V(3).Infof("Unexpected system language (%s), defaulting to en-US", langStr) + if len(pieces) != 2 { + glog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) return "default" } return pieces[0] diff --git a/pkg/util/iptables/BUILD b/pkg/util/iptables/BUILD index a8a1868cddb..c688e303ba5 100644 --- a/pkg/util/iptables/BUILD +++ b/pkg/util/iptables/BUILD @@ -24,6 +24,7 @@ go_library( "//vendor/github.com/godbus/dbus:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index b6c08fa378d..ecd59a476ff 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -338,7 +338,7 @@ func (runner *runner) RestoreAll(data []byte, flush FlushFlag, counters RestoreC } type iptablesLocker interface { - Close() + Close() error } // restoreInternal is the shared part of Restore/RestoreAll @@ -361,7 +361,11 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla if err != nil { return err } - defer locker.Close() + defer func(locker iptablesLocker) { + if err := locker.Close(); err != nil { + glog.Errorf("Failed to close iptables locks: %v", err) + } + }(locker) } // run the command and return the output or an error including the output and error diff --git a/pkg/util/iptables/iptables_linux.go b/pkg/util/iptables/iptables_linux.go index 4f614cb523d..c28fd62dda8 100644 --- a/pkg/util/iptables/iptables_linux.go +++ b/pkg/util/iptables/iptables_linux.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/sys/unix" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" ) @@ -33,13 +34,19 @@ type locker struct { lock14 *net.UnixListener } -func (l *locker) Close() { +func (l *locker) Close() error { + errList := []error{} if l.lock16 != nil { - l.lock16.Close() + if err := l.lock16.Close(); err != nil { + errList = append(errList, err) + } } if l.lock14 != nil { - l.lock14.Close() + if err := l.lock14.Close(); err != nil { + errList = append(errList, err) + } } + return utilerrors.NewAggregate(errList) } func grabIptablesLocks(lockfilePath string) (iptablesLocker, error) { diff --git a/pkg/util/iptables/iptables_test.go b/pkg/util/iptables/iptables_test.go index 824cede9127..c7b3fbb5981 100644 --- a/pkg/util/iptables/iptables_test.go +++ b/pkg/util/iptables/iptables_test.go @@ -333,7 +333,7 @@ func TestEnsureRuleErrorCreating(t *testing.T) { } } -func TestDeleteRuleAlreadyExists(t *testing.T) { +func TestDeleteRuleDoesNotExist(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // iptables version check @@ -368,7 +368,7 @@ func TestDeleteRuleAlreadyExists(t *testing.T) { } } -func TestDeleteRuleNew(t *testing.T) { +func TestDeleteRuleExists(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // iptables version check @@ -438,7 +438,7 @@ func TestDeleteRuleErrorChecking(t *testing.T) { } } -func TestDeleteRuleErrorCreating(t *testing.T) { +func TestDeleteRuleErrorDeleting(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // iptables version check diff --git a/pkg/util/mount/fake.go b/pkg/util/mount/fake.go index 972bff26a10..2b71fa0a728 100644 --- a/pkg/util/mount/fake.go +++ b/pkg/util/mount/fake.go @@ -124,6 +124,14 @@ func (f *FakeMounter) List() ([]MountPoint, error) { return f.MountPoints, nil } +func (f *FakeMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (f *FakeMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(f, dir) +} + func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() diff --git a/pkg/util/mount/mount.go b/pkg/util/mount/mount.go index 44058042d50..0c458d64b49 100644 --- a/pkg/util/mount/mount.go +++ b/pkg/util/mount/mount.go @@ -44,8 +44,21 @@ type Interface interface { // it could change between chunked reads). This is guaranteed to be // consistent. List() ([]MountPoint, error) - // IsLikelyNotMountPoint determines if a directory is a mountpoint. + // IsMountPointMatch determines if the mountpoint matches the dir + IsMountPointMatch(mp MountPoint, dir string) bool + // IsNotMountPoint determines if a directory is a mountpoint. // It should return ErrNotExist when the directory does not exist. + // IsNotMountPoint is more expensive than IsLikelyNotMountPoint. + // IsNotMountPoint detects bind mounts in linux. + // IsNotMountPoint enumerates all the mountpoints using List() and + // the list of mountpoints may be large, then it uses + // IsMountPointMatch to evaluate whether the directory is a mountpoint + IsNotMountPoint(file string) (bool, error) + // IsLikelyNotMountPoint uses heuristics to determine if a directory + // is a mountpoint. + // It should return ErrNotExist when the directory does not exist. + // IsLikelyNotMountPoint does NOT properly detect all mountpoint types + // most notably linux bind mounts. IsLikelyNotMountPoint(file string) (bool, error) // DeviceOpened determines if the device is in use elsewhere // on the system, i.e. still mounted. @@ -199,3 +212,34 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str return path.Base(mountPath), nil } + +// IsNotMountPoint determines if a directory is a mountpoint. +// It should return ErrNotExist when the directory does not exist. +// This method uses the List() of all mountpoints +// It is more extensive than IsLikelyNotMountPoint +// and it detects bind mounts in linux +func IsNotMountPoint(mounter Interface, file string) (bool, error) { + // IsLikelyNotMountPoint provides a quick check + // to determine whether file IS A mountpoint + notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file) + if notMntErr != nil { + return notMnt, notMntErr + } + // identified as mountpoint, so return this fact + if notMnt == false { + return notMnt, nil + } + // check all mountpoints since IsLikelyNotMountPoint + // is not reliable for some mountpoint types + mountPoints, mountPointsErr := mounter.List() + if mountPointsErr != nil { + return notMnt, mountPointsErr + } + for _, mp := range mountPoints { + if mounter.IsMountPointMatch(mp, file) { + notMnt = false + break + } + } + return notMnt, nil +} diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 1685ecb48f0..4c141ad5b0a 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -161,6 +161,15 @@ func (*Mounter) List() ([]MountPoint, error) { return listProcMounts(procMountsPath) } +func (mounter *Mounter) IsMountPointMatch(mp MountPoint, dir string) bool { + deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) + return ((mp.Path == dir) || (mp.Path == deletedDir)) +} + +func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + // IsLikelyNotMountPoint determines if a directory is not a mountpoint. // It is fast but not necessarily ALWAYS correct. If the path is in fact // a bind mount from one part of a mount to another it will not be detected. @@ -168,10 +177,6 @@ func (*Mounter) List() ([]MountPoint, error) { // will return true. When in fact /tmp/b is a mount point. If this situation // if of interest to you, don't use this function... func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { - return IsNotMountPoint(file) -} - -func IsNotMountPoint(file string) (bool, error) { stat, err := os.Stat(file) if err != nil { return true, err diff --git a/pkg/util/mount/mount_unsupported.go b/pkg/util/mount/mount_unsupported.go index f9abab813dc..632ad0606ee 100644 --- a/pkg/util/mount/mount_unsupported.go +++ b/pkg/util/mount/mount_unsupported.go @@ -34,6 +34,14 @@ func (mounter *Mounter) List() ([]MountPoint, error) { return []MountPoint{}, nil } +func (mounter *Mounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } @@ -57,7 +65,3 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { return true, nil } - -func IsNotMountPoint(file string) (bool, error) { - return true, nil -} diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index f3a4afc1b0b..4af8ef0d82e 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -19,6 +19,7 @@ limitations under the License. package mount import ( + "fmt" "os" "path/filepath" "strings" @@ -162,6 +163,15 @@ func (*NsenterMounter) List() ([]MountPoint, error) { return listProcMounts(hostProcMountsPath) } +func (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(m, dir) +} + +func (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) + return ((mp.Path == dir) || (mp.Path == deletedDir)) +} + // IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt // in the host's root mount namespace. func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { diff --git a/pkg/util/mount/nsenter_mount_unsupported.go b/pkg/util/mount/nsenter_mount_unsupported.go index dcf19edefd2..e955e1b781b 100644 --- a/pkg/util/mount/nsenter_mount_unsupported.go +++ b/pkg/util/mount/nsenter_mount_unsupported.go @@ -38,6 +38,14 @@ func (*NsenterMounter) List() ([]MountPoint, error) { return []MountPoint{}, nil } +func (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(m, dir) +} + +func (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + func (*NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } diff --git a/pkg/util/net/BUILD b/pkg/util/net/BUILD index 3ee4e871b0e..0c027b3bd46 100644 --- a/pkg/util/net/BUILD +++ b/pkg/util/net/BUILD @@ -2,17 +2,6 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/pkg/util/net/sets/ipnet.go b/pkg/util/net/sets/ipnet.go index 5b6fe933f1c..90ad58c6354 100644 --- a/pkg/util/net/sets/ipnet.go +++ b/pkg/util/net/sets/ipnet.go @@ -21,8 +21,10 @@ import ( "strings" ) +// IPNet maps string to net.IPNet. type IPNet map[string]*net.IPNet +// ParseIPNets parses string slice to IPNet. func ParseIPNets(specs ...string) (IPNet, error) { ipnetset := make(IPNet) for _, spec := range specs { @@ -96,9 +98,9 @@ func (s IPNet) StringSlice() []string { } // IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 IPNet) IsSuperset(s2 IPNet) bool { +func (s IPNet) IsSuperset(s2 IPNet) bool { for k := range s2 { - _, found := s1[k] + _, found := s[k] if !found { return false } @@ -109,8 +111,8 @@ func (s1 IPNet) IsSuperset(s2 IPNet) bool { // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) -func (s1 IPNet) Equal(s2 IPNet) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) +func (s IPNet) Equal(s2 IPNet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) } // Len returns the size of the set. diff --git a/pkg/util/rand/BUILD b/pkg/util/rand/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/rand/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/removeall/removeall_test.go b/pkg/util/removeall/removeall_test.go index 938ef08e358..a5b19fe41a2 100644 --- a/pkg/util/removeall/removeall_test.go +++ b/pkg/util/removeall/removeall_test.go @@ -49,6 +49,12 @@ func (mounter *fakeMounter) PathIsDevice(pathname string) (bool, error) { func (mounter *fakeMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { return "", errors.New("not implemented") } +func (mounter *fakeMounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} +func (mounter *fakeMounter) IsNotMountPoint(dir string) (bool, error) { + return mount.IsNotMountPoint(mounter, dir) +} func (mounter *fakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { name := path.Base(file) if strings.HasPrefix(name, "mount") { diff --git a/pkg/util/runtime/BUILD b/pkg/util/runtime/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/runtime/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/runtime/doc.go b/pkg/util/runtime/doc.go deleted file mode 100644 index d9aeba6c9c5..00000000000 --- a/pkg/util/runtime/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package runtime only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package runtime diff --git a/pkg/util/sets/doc.go b/pkg/util/sets/doc.go deleted file mode 100644 index 5fe6f4f9eb1..00000000000 --- a/pkg/util/sets/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package sets only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package sets diff --git a/pkg/util/sets/types/BUILD b/pkg/util/sets/types/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/sets/types/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/sets/types/doc.go b/pkg/util/sets/types/doc.go deleted file mode 100644 index 5fe6f4f9eb1..00000000000 --- a/pkg/util/sets/types/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package sets only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package sets diff --git a/pkg/util/slice/slice.go b/pkg/util/slice/slice.go index 1b8f67c0c17..b408dbae841 100644 --- a/pkg/util/slice/slice.go +++ b/pkg/util/slice/slice.go @@ -26,6 +26,9 @@ import ( // CopyStrings copies the contents of the specified string slice // into a new slice. func CopyStrings(s []string) []string { + if s == nil { + return nil + } c := make([]string, len(s)) copy(c, s) return c @@ -41,6 +44,9 @@ func SortStrings(s []string) []string { // ShuffleStrings copies strings from the specified slice into a copy in random // order. It returns a new slice. func ShuffleStrings(s []string) []string { + if s == nil { + return nil + } shuffled := make([]string, len(s)) perm := utilrand.Perm(len(s)) for i, j := range perm { @@ -62,14 +68,3 @@ func ContainsString(slice []string, s string, modifier func(s string) string) bo } return false } - -// Int64Slice attaches the methods of Interface to []int64, -// sorting in increasing order. -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Sorts []int64 in increasing order -func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } diff --git a/pkg/util/slice/slice_test.go b/pkg/util/slice/slice_test.go index 7d74437df16..437c8ecee55 100644 --- a/pkg/util/slice/slice_test.go +++ b/pkg/util/slice/slice_test.go @@ -22,15 +22,29 @@ import ( ) func TestCopyStrings(t *testing.T) { - src := []string{"a", "c", "b"} - dest := CopyStrings(src) + var src1 []string + dest1 := CopyStrings(src1) - if !reflect.DeepEqual(src, dest) { - t.Errorf("%v and %v are not equal", src, dest) + if !reflect.DeepEqual(src1, dest1) { + t.Errorf("%v and %v are not equal", src1, dest1) } - src[0] = "A" - if reflect.DeepEqual(src, dest) { + src2 := []string{} + dest2 := CopyStrings(src2) + + if !reflect.DeepEqual(src2, dest2) { + t.Errorf("%v and %v are not equal", src2, dest2) + } + + src3 := []string{"a", "c", "b"} + dest3 := CopyStrings(src3) + + if !reflect.DeepEqual(src3, dest3) { + t.Errorf("%v and %v are not equal", src3, dest3) + } + + src3[0] = "A" + if reflect.DeepEqual(src3, dest3) { t.Errorf("CopyStrings didn't make a copy") } } @@ -50,9 +64,16 @@ func TestSortStrings(t *testing.T) { } func TestShuffleStrings(t *testing.T) { - src := []string{"a", "b", "c", "d", "e", "f"} + var src []string dest := ShuffleStrings(src) + if dest != nil { + t.Errorf("ShuffleStrings for a nil slice got a non-nil slice") + } + + src = []string{"a", "b", "c", "d", "e", "f"} + dest = ShuffleStrings(src) + if len(src) != len(dest) { t.Errorf("Shuffled slice is wrong length, expected %v got %v", len(src), len(dest)) } @@ -68,12 +89,3 @@ func TestShuffleStrings(t *testing.T) { } } } - -func TestSortInts64(t *testing.T) { - src := []int64{10, 1, 2, 3, 4, 5, 6} - expected := []int64{1, 2, 3, 4, 5, 6, 10} - SortInts64(src) - if !reflect.DeepEqual(src, expected) { - t.Errorf("func Ints64 didnt sort correctly, %v !- %v", src, expected) - } -} diff --git a/pkg/util/term/BUILD b/pkg/util/term/BUILD index e238ccfc48a..0182b46ed25 100644 --- a/pkg/util/term/BUILD +++ b/pkg/util/term/BUILD @@ -5,35 +5,18 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", - "go_test", ) go_library( name = "go_default_library", - srcs = [ - "resize.go", - "resizeevents.go", - "setsize.go", - "term.go", - "term_writer.go", - ], + srcs = ["setsize.go"], tags = ["automanaged"], deps = [ - "//pkg/util/interrupt:go_default_library", "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/mitchellh/go-wordwrap:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", ], ) -go_test( - name = "go_default_test", - srcs = ["term_writer_test.go"], - library = ":go_default_library", - tags = ["automanaged"], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/pkg/util/util.go b/pkg/util/util.go index 356b295a3e1..389e145e849 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -84,6 +84,15 @@ func FileExists(filename string) (bool, error) { return true, nil } +func FileOrSymlinkExists(filename string) (bool, error) { + if _, err := os.Lstat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + // ReadDirNoStat returns a string of files/directories contained // in dirname without calling lstat on them. func ReadDirNoStat(dirname string) ([]string, error) { diff --git a/pkg/util/uuid/BUILD b/pkg/util/uuid/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/uuid/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/validation/BUILD b/pkg/util/validation/BUILD deleted file mode 100644 index e67c7910f31..00000000000 --- a/pkg/util/validation/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/util/validation/field:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/pkg/util/validation/doc.go b/pkg/util/validation/doc.go deleted file mode 100644 index 345315c02d2..00000000000 --- a/pkg/util/validation/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package validation only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package validation diff --git a/pkg/util/validation/field/BUILD b/pkg/util/validation/field/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/validation/field/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/validation/field/doc.go b/pkg/util/validation/field/doc.go deleted file mode 100644 index 0421cbda632..00000000000 --- a/pkg/util/validation/field/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package field only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package field diff --git a/pkg/util/wait/BUILD b/pkg/util/wait/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/wait/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/wait/doc.go b/pkg/util/wait/doc.go deleted file mode 100644 index 748c3a18305..00000000000 --- a/pkg/util/wait/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package wait diff --git a/pkg/util/yaml/BUILD b/pkg/util/yaml/BUILD deleted file mode 100644 index deefdcef3c1..00000000000 --- a/pkg/util/yaml/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - tags = ["automanaged"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/util/yaml/doc.go b/pkg/util/yaml/doc.go deleted file mode 100644 index f046b9b4dfe..00000000000 --- a/pkg/util/yaml/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package yaml only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package yaml diff --git a/pkg/volume/OWNERS b/pkg/volume/OWNERS index bbac9263cdc..7d90f372be0 100644 --- a/pkg/volume/OWNERS +++ b/pkg/volume/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index b96669b020e..a60cb736fe8 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -12,12 +12,14 @@ go_library( name = "go_default_library", srcs = [ "attacher.go", + "azure_common.go", "azure_dd.go", + "azure_mounter.go", "azure_provision.go", - "vhd_util.go", ], tags = ["automanaged"], deps = [ + "//pkg/api:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/util/exec:go_default_library", @@ -26,37 +28,18 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) -go_test( - name = "go_default_test", - srcs = [ - "azure_dd_test.go", - "vhd_util_test.go", - ], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/util/exec:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/util/testing:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -69,3 +52,20 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = [ + "azure_common_test.go", + "azure_dd_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/util/exec:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], +) diff --git a/pkg/volume/azure_dd/OWNERS b/pkg/volume/azure_dd/OWNERS index 51f8d0a1076..5cfad1f70cd 100755 --- a/pkg/volume/azure_dd/OWNERS +++ b/pkg/volume/azure_dd/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 92cb203c468..40c2a1bac88 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -26,54 +26,43 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +type azureDiskDetacher struct { + plugin *azureDataDiskPlugin + cloud *azure.Cloud +} + type azureDiskAttacher struct { - host volume.VolumeHost - azureProvider azureCloudProvider + plugin *azureDataDiskPlugin + cloud *azure.Cloud } var _ volume.Attacher = &azureDiskAttacher{} - -var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} - -const ( - checkSleepDuration = time.Second -) +var _ volume.Detacher = &azureDiskDetacher{} // acquire lock to get an lun number var getLunMutex = keymutex.NewKeyMutex() -// NewAttacher initializes an Attacher -func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - - return &azureDiskAttacher{ - host: plugin.host, - azureProvider: azure, - }, nil -} - // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN -func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { +func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { glog.Warningf("failed to get azure disk spec") return "", err } - instanceid, err := attacher.azureProvider.InstanceID(nodeName) + + instanceid, err := a.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("failed to get azure instance id") return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName) @@ -82,7 +71,12 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node instanceid = instanceid[(ind + 1):] } - lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return "", err + } + + lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) if err == cloudprovider.InstanceNotFound { // Log error and continue with attach glog.Warningf( @@ -98,13 +92,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) - lun, err = attacher.azureProvider.GetNextDiskLun(nodeName) + lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { glog.Warningf("no LUN available for instance %q", nodeName) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid) } glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) - err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { @@ -116,14 +111,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node return strconv.Itoa(int(lun)), err } -func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { +func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { volumesAttachedCheck := make(map[*volume.Spec]bool) volumeSpecMap := make(map[string]*volume.Spec) volumeIDList := []string{} for _, spec := range specs { volumeSource, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -131,11 +126,16 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node volumesAttachedCheck[spec] = true volumeSpecMap[volumeSource.DiskName] = spec } - attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName) + + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return nil, err + } + attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName) if err != nil { // Log error and continue with attach glog.Errorf( - "Error checking if volumes (%v) are attached to current node (%q). err=%v", + "azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err } @@ -144,71 +144,84 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil } -// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned -func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) { +func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) { + var err error + lun, err := strconv.Atoi(devicePath) + if err != nil { + return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath) + } + volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - if len(lunStr) == 0 { - return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName) - } + io := &osIOHandler{} + scsiHostRescan(io) - lun, err := strconv.Atoi(lunStr) - if err != nil { - return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err) - } - scsiHostRescan(&osIOHandler{}) - exe := exec.New() - devicePath := "" + diskName := volumeSource.DiskName + nodeName := a.plugin.host.GetHostName() + newDevicePath := "" - err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) { - glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr) - if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil { - if len(devicePath) == 0 { - glog.Warningf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - return false, fmt.Errorf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - } - glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath) - return true, nil - } else { - //Log error, if any, and continue checking periodically - glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err) - return false, nil + err = wait.Poll(1*time.Second, timeout, func() (bool, error) { + exe := exec.New() + + if newDevicePath, err = findDiskByLun(lun, io, exe); err != nil { + return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err) } + + // did we find it? + if newDevicePath != "" { + // the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on + // Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve + // the root case on Azure. + formatIfNotFormatted(newDevicePath, *volumeSource.FSType) + return true, nil + } + + return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun) }) - return devicePath, err + + return newDevicePath, err } -// GetDeviceMountPath finds the volume's mount path on the node -func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { +// to avoid name conflicts (similar *.vhd name) +// we use hash diskUri and we use it as device mount target. +// this is generalized for both managed and blob disks +// we also prefix the hash with m/b based on disk kind +func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil + if volumeSource.Kind == nil { // this spec was constructed from info on the node + pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI) + return pdPath, nil + } + + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk) } -// MountDevice runs mount command on the node to mount the volume func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - mounter := attacher.host.GetMounter() + mounter := attacher.plugin.host.GetMounter() notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - return err + return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err) } notMnt = true } else { - return err + return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err) } } @@ -218,47 +231,27 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str } options := []string{} - if spec.ReadOnly { - options = append(options, "ro") - } if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()} mountOptions := volume.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions) if err != nil { - os.Remove(deviceMountPath) - return err + if cleanErr := os.Remove(deviceMountPath); cleanErr != nil { + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr) + } + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err) } } return nil } -type azureDiskDetacher struct { - mounter mount.Interface - azureProvider azureCloudProvider -} - -var _ volume.Detacher = &azureDiskDetacher{} - -// NewDetacher initializes a volume Detacher -func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &azureDiskDetacher{ - mounter: plugin.host.GetMounter(), - azureProvider: azure, - }, nil -} - // Detach detaches disk from Azure VM. -func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error { - if diskName == "" { - return fmt.Errorf("invalid disk to detach: %q", diskName) +func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error { + if diskURI == "" { + return fmt.Errorf("invalid disk to detach: %q", diskURI) } - instanceid, err := detacher.azureProvider.InstanceID(nodeName) + + instanceid, err := d.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("no instance id for node %q, skip detaching", nodeName) return nil @@ -267,22 +260,28 @@ func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeNa instanceid = instanceid[(ind + 1):] } - glog.V(4).Infof("detach %v from node %q", diskName, nodeName) - err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName) + glog.V(4).Infof("detach %v from node %q", diskURI, nodeName) + + diskController, err := getDiskController(d.plugin.host) if err != nil { - glog.Errorf("failed to detach azure disk %q, err %v", diskName, err) + return err + } + err = diskController.DetachDiskByName("", diskURI, nodeName) + if err != nil { + glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } + glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) return err } // UnmountDevice unmounts the volume on the node func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - volume := path.Base(deviceMountPath) - if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil { - glog.Errorf("Error unmounting %q: %v", volume, err) - return err + err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter()) + if err == nil { + glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { - return nil + glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) } + return err } diff --git a/pkg/volume/azure_dd/azure_common.go b/pkg/volume/azure_dd/azure_common.go new file mode 100644 index 00000000000..637b75a7959 --- /dev/null +++ b/pkg/volume/azure_dd/azure_common.go @@ -0,0 +1,342 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "regexp" + "strconv" + libstrings "strings" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + defaultFSType = "ext4" + defaultStorageAccountType = storage.StandardLRS +) + +type dataDisk struct { + volume.MetricsProvider + volumeName string + diskName string + podUID types.UID +} + +var ( + supportedCachingModes = sets.NewString( + string(api.AzureDataDiskCachingNone), + string(api.AzureDataDiskCachingReadOnly), + string(api.AzureDataDiskCachingReadWrite)) + + supportedDiskKinds = sets.NewString( + string(api.AzureSharedBlobDisk), + string(api.AzureDedicatedBlobDisk), + string(api.AzureManagedDisk)) + + supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS") +) + +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName) +} + +// creates a unique path for disks (even if they share the same *.vhd name) +func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) { + diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps. + uniqueDiskNameTemplate := "%s%s" + hashedDiskUri := azure.MakeCRC32(diskUri) + prefix := "b" + if isManaged { + prefix = "m" + } + // "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }" + diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri) + pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName) + + return pdPath, nil +} + +func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk { + var metricProvider volume.MetricsProvider + if podUID != "" { + metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host)) + } + + return &dataDisk{ + MetricsProvider: metricProvider, + volumeName: volumeName, + diskName: diskName, + podUID: podUID, + } +} + +func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { + if spec.Volume != nil && spec.Volume.AzureDisk != nil { + return spec.Volume.AzureDisk, nil + } + + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { + return spec.PersistentVolume.Spec.AzureDisk, nil + } + + return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type") +} + +func normalizeFsType(fsType string) string { + if fsType == "" { + return defaultFSType + } + + return fsType +} + +func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { + if kind == "" { + return v1.AzureDedicatedBlobDisk, nil + } + + if !supportedDiskKinds.Has(kind) { + return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List()) + } + + return v1.AzureDataDiskKind(kind), nil +} + +func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) { + if storageAccountType == "" { + return defaultStorageAccountType, nil + } + + if !supportedStorageAccountTypes.Has(storageAccountType) { + return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List()) + } + + return storage.SkuName(storageAccountType), nil +} + +func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) { + if cachingMode == "" { + return v1.AzureDataDiskCachingReadWrite, nil + } + + if !supportedCachingModes.Has(string(cachingMode)) { + return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List()) + } + + return cachingMode, nil +} + +type ioHandler interface { + ReadDir(dirname string) ([]os.FileInfo, error) + WriteFile(filename string, data []byte, perm os.FileMode) error + Readlink(name string) (string, error) +} + +//TODO: check if priming the iscsi interface is actually needed + +type osIOHandler struct{} + +func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { + return ioutil.ReadDir(dirname) +} + +func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { + return ioutil.WriteFile(filename, data, perm) +} + +func (handler *osIOHandler) Readlink(name string) (string, error) { + return os.Readlink(name) +} + +// exclude those used by azure as resource and OS root in /dev/disk/azure +func listAzureDiskPath(io ioHandler) []string { + azureDiskPath := "/dev/disk/azure/" + var azureDiskList []string + if dirs, err := io.ReadDir(azureDiskPath); err == nil { + for _, f := range dirs { + name := f.Name() + diskPath := azureDiskPath + name + if link, linkErr := io.Readlink(diskPath); linkErr == nil { + sd := link[(libstrings.LastIndex(link, "/") + 1):] + azureDiskList = append(azureDiskList, sd) + } + } + } + glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) + return azureDiskList +} + +func scsiHostRescan(io ioHandler) { + scsi_path := "/sys/class/scsi_host/" + if dirs, err := io.ReadDir(scsi_path); err == nil { + for _, f := range dirs { + name := scsi_path + f.Name() + "/scan" + data := []byte("- - -") + if err = io.WriteFile(name, data, 0666); err != nil { + glog.Warningf("failed to rescan scsi host %s", name) + } + } + } else { + glog.Warningf("failed to read %s, err %v", scsi_path, err) + } +} + +func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { + azureDisks := listAzureDiskPath(io) + return findDiskByLunWithConstraint(lun, io, exe, azureDisks) +} + +// finds a device mounted to "current" node +func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { + var err error + sys_path := "/sys/bus/scsi/devices" + if dirs, err := io.ReadDir(sys_path); err == nil { + for _, f := range dirs { + name := f.Name() + // look for path like /sys/bus/scsi/devices/3:0:0:1 + arr := libstrings.Split(name, ":") + if len(arr) < 4 { + continue + } + // extract LUN from the path. + // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 + l, err := strconv.Atoi(arr[3]) + if err != nil { + // unknown path format, continue to read the next one + glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) + continue + } + if lun == l { + // find the matching LUN + // read vendor and model to ensure it is a VHD disk + vendor := path.Join(sys_path, name, "vendor") + model := path.Join(sys_path, name, "model") + out, err := exe.Command("cat", vendor, model).CombinedOutput() + if err != nil { + glog.V(4).Infof("azure disk - failed to cat device vendor and model, err: %v", err) + continue + } + matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", libstrings.ToUpper(string(out))) + if err != nil || !matched { + glog.V(4).Infof("azure disk - doesn't match VHD, output %v, error %v", string(out), err) + continue + } + // find a disk, validate name + dir := path.Join(sys_path, name, "block") + if dev, err := io.ReadDir(dir); err == nil { + found := false + for _, diskName := range azureDisks { + glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName) + if string(dev[0].Name()) == diskName { + found = true + break + } + } + if !found { + return "/dev/" + dev[0].Name(), nil + } + } + } + } + } + return "", err +} + +func formatIfNotFormatted(disk string, fstype string) { + notFormatted, err := diskLooksUnformatted(disk) + if err == nil && notFormatted { + args := []string{disk} + // Disk is unformatted so format it. + // Use 'ext4' as the default + if len(fstype) == 0 { + fstype = "ext4" + } + if fstype == "ext4" || fstype == "ext3" { + args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk} + } + glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args) + runner := exec.New() + cmd := runner.Command("mkfs."+fstype, args...) + _, err := cmd.CombinedOutput() + if err == nil { + // the disk has been formatted successfully try to mount it again. + glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt") + } + glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err) + } else { + if err != nil { + glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err) + } else { + glog.Infof("azureDisk - Disk %s already formatted, will not format", disk) + } + } +} + +func diskLooksUnformatted(disk string) (bool, error) { + args := []string{"-nd", "-o", "FSTYPE", disk} + runner := exec.New() + cmd := runner.Command("lsblk", args...) + glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) + dataOut, err := cmd.CombinedOutput() + if err != nil { + glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + return false, err + } + output := libstrings.TrimSpace(string(dataOut)) + return output == "", nil +} + +func getDiskController(host volume.VolumeHost) (DiskController, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func getCloud(host volume.VolumeHost) (*azure.Cloud, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func strFirstLetterToUpper(str string) string { + if len(str) < 2 { + return str + } + return libstrings.ToUpper(string(str[0])) + str[1:] +} diff --git a/pkg/volume/azure_dd/vhd_util_test.go b/pkg/volume/azure_dd/azure_common_test.go similarity index 98% rename from pkg/volume/azure_dd/vhd_util_test.go rename to pkg/volume/azure_dd/azure_common_test.go index 93c76721778..b0f4988a9e4 100644 --- a/pkg/volume/azure_dd/vhd_util_test.go +++ b/pkg/volume/azure_dd/azure_common_test.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 20464dc217a..49b68cdd43a 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -17,67 +17,62 @@ limitations under the License. package azure_dd import ( - "fmt" - "os" - "path" - "github.com/Azure/azure-sdk-for-go/arm/compute" - + storage "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/golang/glog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - "k8s.io/kubernetes/pkg/util/exec" - "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. -func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&azureDataDiskPlugin{}} -} +// interface exposed by the cloud provider implementing Disk functionlity +type DiskController interface { + CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) + DeleteBlobDisk(diskUri string, wasForced bool) error -type azureDataDiskPlugin struct { - host volume.VolumeHost - volumeLocks keymutex.KeyMutex -} + CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) + DeleteManagedDisk(diskURI string) error -// Abstract interface to disk operations. -// azure cloud provider should implement it -type azureCloudProvider interface { // Attaches the disk to the host machine. - AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error + // Check if a list of volumes are attached to the node with the specified NodeName DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) + // Get the LUN number of the disk that is attached to the host GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error) // Get the next available LUN number to attach a new VHD GetNextDiskLun(nodeName types.NodeName) (int32, error) - // InstanceID returns the cloud provider ID of the specified instance. - InstanceID(nodeName types.NodeName) (string, error) + // Create a VHD blob - CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) + CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) // Delete a VHD blob - DeleteVolume(name, uri string) error + DeleteVolume(diskURI string) error +} + +type azureDataDiskPlugin struct { + host volume.VolumeHost } var _ volume.VolumePlugin = &azureDataDiskPlugin{} var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{} +var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} const ( azureDataDiskPluginName = "kubernetes.io/azure-disk" ) +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&azureDataDiskPlugin{}} +} + func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error { plugin.host = host - plugin.volumeLocks = keymutex.NewKeyMutex() return nil } @@ -91,7 +86,7 @@ func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, err return "", err } - return volumeSource.DiskName, nil + return volumeSource.DataDiskURI, nil } func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool { @@ -117,281 +112,104 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM } } -func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) { - // azures used directly in a pod have a ReadOnly flag set by the pod author. - // azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV - azure, err := getVolumeSource(spec) +// NewAttacher initializes an Attacher +func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { + azure, err := getCloud(plugin.host) if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewAttacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } - fsType := "ext4" - if azure.FSType != nil { - fsType = *azure.FSType - } - cachingMode := v1.AzureDataDiskCachingNone - if azure.CachingMode != nil { - cachingMode = *azure.CachingMode - } - readOnly := false - if azure.ReadOnly != nil { - readOnly = *azure.ReadOnly - } - diskName := azure.DiskName - diskUri := azure.DataDiskURI - return &azureDiskMounter{ - azureDisk: &azureDisk{ - podUID: podUID, - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - cachingMode: cachingMode, - mounter: mounter, - plugin: plugin, - }, - fsType: fsType, - readOnly: readOnly, - diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil -} -func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) { - return &azureDiskUnmounter{ - &azureDisk{ - podUID: podUID, - volName: volName, - mounter: mounter, - plugin: plugin, - }, + return &azureDiskAttacher{ + plugin: plugin, + cloud: azure, }, nil } -func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) { - mounter := plugin.host.GetMounter() - pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) - sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) +func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { + azure, err := getCloud(plugin.host) + if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) + return nil, err + } + + return &azureDiskDetacher{ + plugin: plugin, + cloud: azure, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + volumeSource, err := getVolumeSource(spec) if err != nil { return nil, err } - azVolume := &v1.Volume{ - Name: volName, + + disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host) + + return &azureDiskDeleter{ + spec: spec, + plugin: plugin, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if len(options.PVC.Spec.AccessModes) == 0 { + options.PVC.Spec.AccessModes = plugin.GetAccessModes() + } + + return &azureDiskProvisioner{ + plugin: plugin, + options: options, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) { + volumeSource, err := getVolumeSource(spec) + if err != nil { + return nil, err + } + disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host) + + return &azureDiskMounter{ + plugin: plugin, + spec: spec, + options: options, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { + disk := makeDataDisk(volName, podUID, "", plugin.host) + + return &azureDiskUnmounter{ + plugin: plugin, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + mounter := plugin.host.GetMounter() + pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) + sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) + + if err != nil { + return nil, err + } + + azureVolume := &v1.Volume{ + Name: volumeName, VolumeSource: v1.VolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: sourceName, + DataDiskURI: sourceName, }, }, } - return volume.NewSpecFromVolume(azVolume), nil + return volume.NewSpecFromVolume(azureVolume), nil } func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { - mounter := plugin.host.GetMounter() - return mount.GetMountRefs(mounter, deviceMountPath) -} - -type azureDisk struct { - volName string - podUID types.UID - diskName string - diskUri string - cachingMode v1.AzureDataDiskCachingMode - mounter mount.Interface - plugin *azureDataDiskPlugin - volume.MetricsNil -} - -type azureDiskMounter struct { - *azureDisk - // Filesystem type, optional. - fsType string - // Specifies whether the disk will be attached as read-only. - readOnly bool - // diskMounter provides the interface that is used to mount the actual block device. - diskMounter *mount.SafeFormatAndMount -} - -var _ volume.Mounter = &azureDiskMounter{} - -func (b *azureDiskMounter) GetAttributes() volume.Attributes { - return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: !b.readOnly, - SupportsSELinux: true, - } -} - -// Checks prior to mount operations to verify that the required components (binaries, etc.) -// to mount the volume are available on the underlying node. -// If not, it returns an error -func (b *azureDiskMounter) CanMount() error { - return nil -} - -// SetUp attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUp(fsGroup *int64) error { - return b.SetUpAt(b.GetPath(), fsGroup) -} - -// SetUpAt attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { - b.plugin.volumeLocks.LockKey(b.diskName) - defer b.plugin.volumeLocks.UnlockKey(b.diskName) - - // TODO: handle failed mounts here. - notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err) - if err != nil && !os.IsNotExist(err) { - glog.Errorf("IsLikelyNotMountPoint failed: %v", err) - return err - } - if !notMnt { - glog.V(4).Infof("%s is a mount point", dir) - return nil - } - - globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName) - - if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) - return err - } - - // Perform a bind mount to the full path to allow duplicate mounts of the same PD. - options := []string{"bind"} - if b.readOnly { - options = append(options, "ro") - } - err = b.mounter.Mount(globalPDPath, dir, "", options) - if err != nil { - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) - return err - } - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) - return err - } - } - os.Remove(dir) - return err - } - - if !b.readOnly { - volume.SetVolumeOwnership(b, fsGroup) - } - glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir) - return nil -} - -func makeGlobalPDPath(host volume.VolumeHost, volume string) string { - return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume) -} - -func (azure *azureDisk) GetPath() string { - name := azureDataDiskPluginName - return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName) -} - -type azureDiskUnmounter struct { - *azureDisk -} - -var _ volume.Unmounter = &azureDiskUnmounter{} - -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDown() error { - return c.TearDownAt(c.GetPath()) -} - -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := util.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - - notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) - if err != nil { - glog.Errorf("Error checking if mountpoint %s: %v", dir, err) - return err - } - if notMnt { - glog.V(2).Info("Not mountpoint, deleting") - return os.Remove(dir) - } - // lock the volume (and thus wait for any concurrrent SetUpAt to finish) - c.plugin.volumeLocks.LockKey(c.diskName) - defer c.plugin.volumeLocks.UnlockKey(c.diskName) - refs, err := mount.GetMountRefs(c.mounter, dir) - if err != nil { - glog.Errorf("Error getting mountrefs for %s: %v", dir, err) - return err - } - if len(refs) == 0 { - glog.Errorf("Did not find pod-mount for %s during tear down", dir) - return fmt.Errorf("%s is not mounted", dir) - } - c.diskName = path.Base(refs[0]) - glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir) - - // Unmount the bind-mount inside this pod - if err := c.mounter.Unmount(dir); err != nil { - glog.Errorf("Error unmounting dir %s %v", dir, err) - return err - } - notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if notMnt { - if err := os.Remove(dir); err != nil { - glog.Errorf("Error removing mountpoint %s %v", dir, err) - return err - } - } - return nil -} - -func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { - if spec.Volume != nil && spec.Volume.AzureDisk != nil { - return spec.Volume.AzureDisk, nil - } - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { - return spec.PersistentVolume.Spec.AzureDisk, nil - } - - return nil, fmt.Errorf("Spec does not reference an Azure disk volume type") -} - -// Return cloud provider -func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) { - azureCloudProvider, ok := cloudProvider.(*azure.Cloud) - if !ok || azureCloudProvider == nil { - return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) - } - - return azureCloudProvider, nil + m := plugin.host.GetMounter() + return mount.GetMountRefs(m, deviceMountPath) } diff --git a/pkg/volume/azure_dd/azure_dd_test.go b/pkg/volume/azure_dd/azure_dd_test.go index 4397347f126..59becdeeada 100644 --- a/pkg/volume/azure_dd/azure_dd_test.go +++ b/pkg/volume/azure_dd/azure_dd_test.go @@ -17,17 +17,11 @@ limitations under the License. package azure_dd import ( - "fmt" "os" - "path" "testing" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" ) @@ -57,121 +51,5 @@ func TestCanSupport(t *testing.T) { } } -const ( - fakeDiskName = "foo" - fakeDiskUri = "https://azure/vhds/bar.vhd" - fakeLun = 2 -) - -type fakeAzureProvider struct { -} - -func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun { - return fmt.Errorf("wrong disk") - } - return nil - -} - -func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri { - return fmt.Errorf("wrong disk") - } - return nil -} -func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) { - return int32(fakeLun), nil -} - -func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) { - return fakeLun, nil -} -func (fake *fakeAzureProvider) InstanceID(name string) (string, error) { - return "localhost", nil -} - -func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - return "", "", 0, fmt.Errorf("not implemented") -} - -func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error { - return fmt.Errorf("not implemented") -} - -func TestPlugin(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("azure_ddTest") - if err != nil { - t.Fatalf("can't make a temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName) - if err != nil { - t.Errorf("Can't find the plugin by name") - } - fs := "ext4" - ro := false - caching := v1.AzureDataDiskCachingNone - spec := &v1.Volume{ - Name: "vol1", - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: fakeDiskName, - DataDiskURI: fakeDiskUri, - FSType: &fs, - CachingMode: &caching, - ReadOnly: &ro, - }, - }, - } - mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Mounter: %v", err) - } - if mounter == nil { - t.Errorf("Got a nil Mounter") - } - volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1") - path := mounter.GetPath() - if path != volPath { - t.Errorf("Got unexpected path: %s, should be %s", path, volPath) - } - - if err := mounter.SetUp(nil); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - - unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Unmounter: %v", err) - } - if unmounter == nil { - t.Errorf("Got a nil Unmounter") - } - - if err := unmounter.TearDown(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err == nil { - t.Errorf("TearDown() failed, volume path still exists: %s", path) - } else if !os.IsNotExist(err) { - t.Errorf("SetUp() failed: %v", err) - } -} +// fakeAzureProvider type was removed because all functions were not used +// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test. diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go new file mode 100644 index 00000000000..eedb5535f75 --- /dev/null +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -0,0 +1,184 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +type azureDiskMounter struct { + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} + +type azureDiskUnmounter struct { + *dataDisk + plugin *azureDataDiskPlugin +} + +var _ volume.Unmounter = &azureDiskUnmounter{} +var _ volume.Mounter = &azureDiskMounter{} + +func (m *azureDiskMounter) GetAttributes() volume.Attributes { + volumeSource, _ := getVolumeSource(m.spec) + return volume.Attributes{ + ReadOnly: *volumeSource.ReadOnly, + Managed: !*volumeSource.ReadOnly, + SupportsSELinux: true, + } +} + +func (m *azureDiskMounter) CanMount() error { + return nil +} + +func (m *azureDiskMounter) SetUp(fsGroup *int64) error { + return m.SetUpAt(m.GetPath(), fsGroup) +} + +func (m *azureDiskMounter) GetPath() string { + return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host) +} + +func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { + mounter := m.plugin.host.GetMounter() + volumeSource, err := getVolumeSource(m.spec) + + if err != nil { + glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) + return err + } + + diskName := volumeSource.DiskName + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + + if err != nil && !os.IsNotExist(err) { + glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) + return err + } + if !mountPoint { + return fmt.Errorf("azureDisk - Not a mounting point for disk %s on %s", diskName, dir) + } + + if err := os.MkdirAll(dir, 0750); err != nil { + glog.Infof("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) + return err + } + + options := []string{"bind"} + + if *volumeSource.ReadOnly { + options = append(options, "ro") + } + + glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) + + if err != nil { + return err + } + + mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options) + // Everything in the following control flow is meant as an + // attempt cleanup a failed setupAt (bind mount) + if mountErr != nil { + glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr) + } + + if !mountPoint { + if err = mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + if !mountPoint { + // not cool. leave for next sync loop. + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr) + } + } + + if err = os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr) + } + + glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr) + return mountErr + } + + if !*volumeSource.ReadOnly { + volume.SetVolumeOwnership(m, fsGroup) + } + + glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) + return nil +} + +func (u *azureDiskUnmounter) TearDown() error { + return u.TearDownAt(u.GetPath()) +} + +func (u *azureDiskUnmounter) TearDownAt(dir string) error { + if pathExists, pathErr := util.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + + glog.V(4).Infof("azureDisk - TearDownAt: %s", dir) + mounter := u.plugin.host.GetMounter() + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err) + } + if mountPoint { + if err := os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err) + } + } + if err := mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err) + } + mountPoint, err = mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err) + } + + if mountPoint { + return os.Remove(dir) + } + + return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir") +} + +func (u *azureDiskUnmounter) GetPath() string { + return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host) +} diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index 67d620ae928..e47da2402e8 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,147 +20,182 @@ import ( "fmt" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) -var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} -var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +type azureDiskProvisioner struct { + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} type azureDiskDeleter struct { - *azureDisk - azureProvider azureCloudProvider -} - -func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - - return plugin.newDeleterInternal(spec, azure) -} - -func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) { - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil { - return nil, fmt.Errorf("invalid PV spec") - } - diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName - diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI - return &azureDiskDeleter{ - azureDisk: &azureDisk{ - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - plugin: plugin, - }, - azureProvider: azure, - }, nil -} - -func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - if len(options.PVC.Spec.AccessModes) == 0 { - options.PVC.Spec.AccessModes = plugin.GetAccessModes() - } - return plugin.newProvisionerInternal(options, azure) -} - -func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) { - return &azureDiskProvisioner{ - azureDisk: &azureDisk{ - plugin: plugin, - }, - azureProvider: azure, - options: options, - }, nil -} - -var _ volume.Deleter = &azureDiskDeleter{} - -func (d *azureDiskDeleter) GetPath() string { - name := azureDataDiskPluginName - return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName) -} - -func (d *azureDiskDeleter) Delete() error { - glog.V(4).Infof("deleting volume %s", d.diskUri) - return d.azureProvider.DeleteVolume(d.diskName, d.diskUri) -} - -type azureDiskProvisioner struct { - *azureDisk - azureProvider azureCloudProvider - options volume.VolumeOptions + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin } var _ volume.Provisioner = &azureDiskProvisioner{} +var _ volume.Deleter = &azureDiskDeleter{} -func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) { - return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes()) +func (d *azureDiskDeleter) GetPath() string { + return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host) +} + +func (d *azureDiskDeleter) Delete() error { + volumeSource, err := getVolumeSource(d.spec) + if err != nil { + return err } - var sku, location, account string + diskController, err := getDiskController(d.plugin.host) + if err != nil { + return err + } + wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk) + managed := (*volumeSource.Kind == v1.AzureManagedDisk) + + if managed { + return diskController.DeleteManagedDisk(volumeSource.DataDiskURI) + } + + return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone) +} + +func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { + if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) + } + supportedModes := p.plugin.GetAccessModes() + + // perform static validation first + if p.options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") + } + + if len(p.options.PVC.Spec.AccessModes) > 1 { + return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin") + } + + if len(p.options.PVC.Spec.AccessModes) == 1 { + if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] { + return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes) + } + } + + var ( + location, account string + storageAccountType, fsType string + cachingMode v1.AzureDataDiskCachingMode + strKind string + err error + ) // maxLength = 79 - (4 for ".vhd") = 75 - name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75) - capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) - // Apply ProvisionerParameters (case-insensitive). We leave validation of - // the values to the cloud provider. - for k, v := range a.options.Parameters { + for k, v := range p.options.Parameters { switch strings.ToLower(k) { case "skuname": - sku = v + storageAccountType = v case "location": location = v case "storageaccount": account = v + case "storageaccounttype": + storageAccountType = v + case "kind": + strKind = v + case "cachingmode": + cachingMode = v1.AzureDataDiskCachingMode(v) + case "fstype": + fsType = strings.ToLower(v) default: - return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName()) + return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } } - // TODO: implement c.options.ProvisionerSelector parsing - if a.options.PVC.Spec.Selector != nil { - return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") - } - diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB) + // normalize values + fsType = normalizeFsType(fsType) + skuName, err := normalizeStorageAccountType(storageAccountType) if err != nil { return nil, err } + kind, err := normalizeKind(strFirstLetterToUpper(strKind)) + if err != nil { + return nil, err + } + + if cachingMode, err = normalizeCachingMode(cachingMode); err != nil { + return nil, err + } + + diskController, err := getDiskController(p.plugin.host) + if err != nil { + return nil, err + } + + // create disk + diskURI := "" + if kind == v1.AzureManagedDisk { + diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags)) + if err != nil { + return nil, err + } + } else { + forceStandAlone := (kind == v1.AzureDedicatedBlobDisk) + if kind == v1.AzureDedicatedBlobDisk { + if location != "" && account != "" { + // use dedicated kind (by default) for compatibility + _, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB) + if err != nil { + return nil, err + } + } else { + if location != "" || account != "" { + return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed", + location, account) + } + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } else { + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: a.options.PVName, + Name: p.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "azure-disk-dynamic-provisioner", + "volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy, - AccessModes: a.options.PVC.Spec.AccessModes, + PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy, + AccessModes: supportedModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: diskName, - DataDiskURI: diskUri, + CachingMode: &cachingMode, + DiskName: name, + DataDiskURI: diskURI, + Kind: &kind, + FSType: &fsType, }, }, }, diff --git a/pkg/volume/azure_dd/vhd_util.go b/pkg/volume/azure_dd/vhd_util.go deleted file mode 100644 index 8db5093b76f..00000000000 --- a/pkg/volume/azure_dd/vhd_util.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure_dd - -import ( - "io/ioutil" - "os" - "path" - "regexp" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/exec" -) - -type ioHandler interface { - ReadDir(dirname string) ([]os.FileInfo, error) - WriteFile(filename string, data []byte, perm os.FileMode) error - Readlink(name string) (string, error) -} - -type osIOHandler struct{} - -func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { - return ioutil.ReadDir(dirname) -} -func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { - return ioutil.WriteFile(filename, data, perm) -} -func (handler *osIOHandler) Readlink(name string) (string, error) { - return os.Readlink(name) -} - -// exclude those used by azure as resource and OS root in /dev/disk/azure -func listAzureDiskPath(io ioHandler) []string { - azureDiskPath := "/dev/disk/azure/" - var azureDiskList []string - if dirs, err := io.ReadDir(azureDiskPath); err == nil { - for _, f := range dirs { - name := f.Name() - diskPath := azureDiskPath + name - if link, linkErr := io.Readlink(diskPath); linkErr == nil { - sd := link[(strings.LastIndex(link, "/") + 1):] - azureDiskList = append(azureDiskList, sd) - } - } - } - glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) - return azureDiskList -} - -// given a LUN find the VHD device path like /dev/sdd -// exclude those disks used by Azure resources and OS root -func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { - azureDisks := listAzureDiskPath(io) - return findDiskByLunWithConstraint(lun, io, exe, azureDisks) -} - -// look for device /dev/sdX and validate it is a VHD -// return empty string if no disk is found -func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { - var err error - sys_path := "/sys/bus/scsi/devices" - if dirs, err := io.ReadDir(sys_path); err == nil { - for _, f := range dirs { - name := f.Name() - // look for path like /sys/bus/scsi/devices/3:0:0:1 - arr := strings.Split(name, ":") - if len(arr) < 4 { - continue - } - // extract LUN from the path. - // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 - l, err := strconv.Atoi(arr[3]) - if err != nil { - // unknown path format, continue to read the next one - glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err) - continue - } - if lun == l { - // find the matching LUN - // read vendor and model to ensure it is a VHD disk - vendor := path.Join(sys_path, name, "vendor") - model := path.Join(sys_path, name, "model") - out, err := exe.Command("cat", vendor, model).CombinedOutput() - if err != nil { - glog.Errorf("failed to cat device vendor and model, err: %v", err) - continue - } - matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out))) - if err != nil || !matched { - glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err) - continue - } - // find a disk, validate name - dir := path.Join(sys_path, name, "block") - if dev, err := io.ReadDir(dir); err == nil { - found := false - for _, diskName := range azureDisks { - glog.V(12).Infof("validating disk %q with sys disk %q", dev[0].Name(), diskName) - if string(dev[0].Name()) == diskName { - found = true - break - } - } - if !found { - return "/dev/" + dev[0].Name(), nil - } - } - } - } - } - return "", err -} - -// rescan scsi bus -func scsiHostRescan(io ioHandler) { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := io.ReadDir(scsi_path); err == nil { - for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" - data := []byte("- - -") - if err = io.WriteFile(name, data, 0666); err != nil { - glog.Errorf("failed to rescan scsi host %s", name) - } - } - } else { - glog.Errorf("failed to read %s, err %v", scsi_path, err) - } -} diff --git a/pkg/volume/azure_file/OWNERS b/pkg/volume/azure_file/OWNERS index 51f8d0a1076..5cfad1f70cd 100644 --- a/pkg/volume/azure_file/OWNERS +++ b/pkg/volume/azure_file/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/azure_file/azure_provision.go b/pkg/volume/azure_file/azure_provision.go index 9972c528214..a74fbc2cefa 100644 --- a/pkg/volume/azure_file/azure_provision.go +++ b/pkg/volume/azure_file/azure_provision.go @@ -138,7 +138,9 @@ func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) { var sku, location, account string - name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75) + // File share name has a length limit of 63, and it cannot contain two consecutive '-'s. + name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) + name = strings.Replace(name, "--", "-", -1) capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) diff --git a/pkg/volume/cephfs/OWNERS b/pkg/volume/cephfs/OWNERS index 510c76e9db3..bda61e5c932 100644 --- a/pkg/volume/cephfs/OWNERS +++ b/pkg/volume/cephfs/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/cinder/OWNERS b/pkg/volume/cinder/OWNERS index 96361ff64e4..b8bd178391f 100644 --- a/pkg/volume/cinder/OWNERS +++ b/pkg/volume/cinder/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/configmap/OWNERS b/pkg/volume/configmap/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/configmap/OWNERS +++ b/pkg/volume/configmap/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index bab372e244d..af05aef1145 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -266,9 +266,7 @@ func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode * if optional { continue } - err_msg := "references non-existent config key" - glog.Errorf(err_msg) - return nil, fmt.Errorf(err_msg) + return nil, fmt.Errorf("configmap references non-existent config key: %s", ktp.Key) } fileProjection.Data = []byte(content) diff --git a/pkg/volume/downwardapi/OWNERS b/pkg/volume/downwardapi/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/downwardapi/OWNERS +++ b/pkg/volume/downwardapi/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/empty_dir/OWNERS b/pkg/volume/empty_dir/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/empty_dir/OWNERS +++ b/pkg/volume/empty_dir/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/empty_dir/empty_dir.go b/pkg/volume/empty_dir/empty_dir.go index c0cfc02ceab..2cb1aba212f 100644 --- a/pkg/volume/empty_dir/empty_dir.go +++ b/pkg/volume/empty_dir/empty_dir.go @@ -234,8 +234,7 @@ func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error { return err } -// setupTmpfs creates a tmpfs mount at the specified directory with the -// specified SELinux context. +// setupTmpfs creates a tmpfs mount at the specified directory. func (ed *emptyDir) setupTmpfs(dir string) error { if ed.mounter == nil { return fmt.Errorf("memory storage requested, but mounter is nil") @@ -258,8 +257,7 @@ func (ed *emptyDir) setupTmpfs(dir string) error { return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */) } -// setupDir creates the directory with the specified SELinux context and -// the default permissions specified by the perm constant. +// setupDir creates the directory with the default permissions specified by the perm constant. func (ed *emptyDir) setupDir(dir string) error { // Create the directory if it doesn't already exist. if err := os.MkdirAll(dir, perm); err != nil { diff --git a/pkg/volume/fc/OWNERS b/pkg/volume/fc/OWNERS index 0c721272070..ad0eff1fc62 100644 --- a/pkg/volume/fc/OWNERS +++ b/pkg/volume/fc/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/flexvolume/OWNERS b/pkg/volume/flexvolume/OWNERS index 34c7f918d9b..7ac8bb2f613 100644 --- a/pkg/volume/flexvolume/OWNERS +++ b/pkg/volume/flexvolume/OWNERS @@ -11,7 +11,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index 86644fc21ba..4640c51d8bb 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -144,7 +144,7 @@ func (dc *DriverCall) Run() (*DriverStatus, error) { if isCmdNotSupportedErr(err) { dc.plugin.unsupported(dc.Command) } else { - glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %s", execPath, dc.args, execErr.Error(), output) + glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output) } return nil, err } @@ -222,7 +222,7 @@ func isCmdNotSupportedErr(err error) bool { func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) { var status DriverStatus if err := json.Unmarshal(output, &status); err != nil { - glog.Errorf("Failed to unmarshal output for command: %s, output: %s, error: %s", cmd, string(output), err.Error()) + glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error()) return nil, err } else if status.Status == StatusNotSupported { glog.V(5).Infof("%s command is not supported by the driver", cmd) diff --git a/pkg/volume/flocker/OWNERS b/pkg/volume/flocker/OWNERS index b0a585eec10..663ed96d5bb 100644 --- a/pkg/volume/flocker/OWNERS +++ b/pkg/volume/flocker/OWNERS @@ -9,7 +9,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/gce_pd/OWNERS b/pkg/volume/gce_pd/OWNERS index 3c1271befda..1f138a15291 100644 --- a/pkg/volume/gce_pd/OWNERS +++ b/pkg/volume/gce_pd/OWNERS @@ -4,7 +4,6 @@ approvers: reviewers: - thockin - smarterclayton -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/git_repo/OWNERS b/pkg/volume/git_repo/OWNERS index 7954c84a9a4..dba0b14ec9e 100644 --- a/pkg/volume/git_repo/OWNERS +++ b/pkg/volume/git_repo/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/glusterfs/OWNERS b/pkg/volume/glusterfs/OWNERS index 4271b66de44..89a204b4b90 100644 --- a/pkg/volume/glusterfs/OWNERS +++ b/pkg/volume/glusterfs/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index bbeef798713..9bdb6cce03b 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -344,24 +344,23 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { return nil } - // Give a try without `auto_unmount mount option, because - // it could be that gluster fuse client is older version and - // mount.glusterfs is unaware of `auto_unmount`. - // Use a mount string without `auto_unmount`` - - autoMountOptions := make([]string, len(mountOptions)) - for _, opt := range mountOptions { - if opt != "auto_unmount" { - autoMountOptions = append(autoMountOptions, opt) + const invalidOption = "Invalid option auto_unmount" + if dstrings.Contains(errs.Error(), invalidOption) { + // Give a try without `auto_unmount` mount option, because + // it could be that gluster fuse client is older version and + // mount.glusterfs is unaware of `auto_unmount`. + noAutoMountOptions := make([]string, len(mountOptions)) + for _, opt := range mountOptions { + if opt != "auto_unmount" { + noAutoMountOptions = append(noAutoMountOptions, opt) + } + } + errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions) + if errs == nil { + glog.Infof("glusterfs: successfully mounted %s", dir) + return nil } } - - autoErr := b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", autoMountOptions) - if autoErr == nil { - glog.Infof("glusterfs: successfully mounted %s", dir) - return nil - } - } // Failed mount scenario. diff --git a/pkg/volume/host_path/OWNERS b/pkg/volume/host_path/OWNERS index d99a7a678bc..3b57899265f 100644 --- a/pkg/volume/host_path/OWNERS +++ b/pkg/volume/host_path/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/iscsi/OWNERS b/pkg/volume/iscsi/OWNERS index 242946cda63..0d7185f0b45 100644 --- a/pkg/volume/iscsi/OWNERS +++ b/pkg/volume/iscsi/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 7190859b542..9b5d3cb7a59 100755 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -114,7 +114,7 @@ func waitForPathToExistInternal(devicePath *string, maxRetries int, deviceTransp if err == nil { return true } - if err != nil && !os.IsNotExist(err) { + if !os.IsNotExist(err) { return false } if i == maxRetries-1 { diff --git a/pkg/volume/local/local.go b/pkg/volume/local/local.go index b09451e6f17..25f266d936b 100644 --- a/pkg/volume/local/local.go +++ b/pkg/volume/local/local.go @@ -198,7 +198,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("invalid path: %s %v", m.globalPath, err) } - notMnt, err := m.mounter.IsLikelyNotMountPoint(dir) + notMnt, err := m.mounter.IsNotMountPoint(dir) glog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) if err != nil && !os.IsNotExist(err) { glog.Errorf("cannot validate mount point: %s %v", dir, err) @@ -223,9 +223,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { err = m.mounter.Mount(m.globalPath, dir, "", options) if err != nil { glog.Errorf("Mount of volume %s failed: %v", dir, err) - notMnt, mntErr := m.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr := m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { @@ -233,9 +233,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Failed to unmount: %v", mntErr) return err } - notMnt, mntErr = m.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr = m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { @@ -269,5 +269,5 @@ func (u *localVolumeUnmounter) TearDown() error { // TearDownAt unmounts the bind mount func (u *localVolumeUnmounter) TearDownAt(dir string) error { glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) - return util.UnmountPath(dir, u.mounter) + return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */ } diff --git a/pkg/volume/nfs/OWNERS b/pkg/volume/nfs/OWNERS index ae747e33483..e5cd1fb42df 100644 --- a/pkg/volume/nfs/OWNERS +++ b/pkg/volume/nfs/OWNERS @@ -9,7 +9,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 099e2ad9954..34970214588 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -362,7 +362,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) } err := plugin.Init(host) if err != nil { - glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error()) + glog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) allErrs = append(allErrs, err) continue } diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 6c4c109668a..ec971a7e370 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -266,7 +266,7 @@ func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("Portworx Volume set up: %s %v %v", dir, !notMnt, err) + glog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { glog.Errorf("Cannot validate mountpoint: %s", dir) return err @@ -291,7 +291,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Portworx Volume %s mounted to %s", b.volumeID, dir) + glog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil } @@ -314,8 +314,8 @@ func (c *portworxVolumeUnmounter) TearDown() error { // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *portworxVolumeUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("Portworx Volume TearDown of %s", dir) - // Call Portworx Unmount for Portworx's book-keeping. + glog.Infof("Portworx Volume TearDown of %s", dir) + if err := c.manager.UnmountVolume(c, dir); err != nil { return err } diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index cd96ec2ba47..1a7c387eaf8 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -43,12 +43,14 @@ type PortworxVolumeUtil struct { // CreateVolume creates a Portworx volume. func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int, map[string]string, error) { - driver, err := util.getPortworxDriver(p.plugin.host) + driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return "", 0, nil, err } + glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Portworx Volumes are specified in GB requestGB := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) @@ -56,6 +58,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri specHandler := osdspec.NewSpecHandler() spec, err := specHandler.SpecFromOpts(p.options.Parameters) if err != nil { + glog.Errorf("Error parsing parameters for PVC: %v. Err: %v", p.options.PVC.Name, err) return "", 0, nil, err } spec.Size = uint64(requestGB * 1024 * 1024 * 1024) @@ -68,14 +71,16 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri locator.VolumeLabels[pvcClaimLabel] = p.options.PVC.Name volumeID, err := driver.Create(&locator, &source, spec) if err != nil { - glog.V(2).Infof("Error creating Portworx Volume : %v", err) + glog.Errorf("Error creating Portworx Volume : %v", err) } + + glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) return volumeID, requestGB, nil, err } // DeleteVolume deletes a Portworx volume func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { - driver, err := util.getPortworxDriver(d.plugin.host) + driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -83,7 +88,7 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { err = driver.Delete(d.volumeID) if err != nil { - glog.V(2).Infof("Error deleting Portworx Volume (%v): %v", d.volName, err) + glog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) return err } return nil @@ -91,7 +96,7 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { // AttachVolume attaches a Portworx Volume func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, error) { - driver, err := util.getPortworxDriver(m.plugin.host) + driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return "", err @@ -99,7 +104,7 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, devicePath, err := driver.Attach(m.volName) if err != nil { - glog.V(2).Infof("Error attaching Portworx Volume (%v): %v", m.volName, err) + glog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) return "", err } return devicePath, nil @@ -107,7 +112,7 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, // DetachVolume detaches a Portworx Volume func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { - driver, err := util.getPortworxDriver(u.plugin.host) + driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -115,7 +120,7 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { err = driver.Detach(u.volName) if err != nil { - glog.V(2).Infof("Error detaching Portworx Volume (%v): %v", u.volName, err) + glog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) return err } return nil @@ -123,7 +128,7 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { // MountVolume mounts a Portworx Volume on the specified mountPath func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { - driver, err := util.getPortworxDriver(m.plugin.host) + driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -131,7 +136,7 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath err = driver.Mount(m.volName, mountPath) if err != nil { - glog.V(2).Infof("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) + glog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) return err } return nil @@ -139,7 +144,7 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath // UnmountVolume unmounts a Portworx Volume func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { - driver, err := util.getPortworxDriver(u.plugin.host) + driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -147,7 +152,7 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP err = driver.Unmount(u.volName, mountPath) if err != nil { - glog.V(2).Infof("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) + glog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) return err } return nil @@ -181,13 +186,34 @@ func createDriverClient(hostname string) (*osdclient.Client, error) { } } -func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { +// getPortworxDriver() returns a Portworx volume driver which can be used for volume operations +// localOnly: If true, the returned driver will be connected to Portworx API server on volume host. +// If false, driver will be connected to API server on volume host or Portworx k8s service cluster IP +// This flag is required to explicitly force certain operations (mount, unmount, detach, attach) to +// go to the volume host instead of the k8s service which might route it to any host. This pertains to how +// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to +// see the pod container mounts (specifically /var/lib/kubelet/pods/) +// Operations like create and delete volume don't need to be restricted to local volume host since +// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to +// the Portworx node that will own/owns the data. +func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, localOnly bool) (volumeapi.VolumeDriver, error) { + var err error + if localOnly { + util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) + if err != nil { + return nil, err + } else { + glog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) + return volumeclient.VolumeDriver(util.portworxClient), nil + } + } + + // check if existing saved client is valid if isValid, _ := isClientValid(util.portworxClient); isValid { return volumeclient.VolumeDriver(util.portworxClient), nil } // create new client - var err error util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) // for backward compatibility if err != nil || util.portworxClient == nil { // Create client from portworx service @@ -215,7 +241,7 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) return nil, err } - glog.Infof("Using portworx service at: %v as api endpoint", svc.Spec.ClusterIP) + glog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) } else { glog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) } diff --git a/pkg/volume/quobyte/OWNERS b/pkg/volume/quobyte/OWNERS index 025093af3df..38c978a79d6 100644 --- a/pkg/volume/quobyte/OWNERS +++ b/pkg/volume/quobyte/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/quobyte/quobyte.go b/pkg/volume/quobyte/quobyte.go index 7cc98138916..5fae74de721 100644 --- a/pkg/volume/quobyte/quobyte.go +++ b/pkg/volume/quobyte/quobyte.go @@ -365,6 +365,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, } provisioner.config = "BASE" provisioner.tenant = "DEFAULT" + createQuota := false cfg, err := parseAPIConfig(provisioner.plugin, provisioner.options.Parameters) if err != nil { @@ -382,6 +383,8 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, provisioner.tenant = v case "quobyteconfig": provisioner.config = v + case "createquota": + createQuota = gostrings.ToLower(v) == "true" case "adminsecretname", "adminsecretnamespace", "quobyteapiserver": @@ -402,7 +405,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, config: cfg, } - vol, sizeGB, err := manager.createVolume(provisioner) + vol, sizeGB, err := manager.createVolume(provisioner, createQuota) if err != nil { return nil, err } diff --git a/pkg/volume/quobyte/quobyte_util.go b/pkg/volume/quobyte/quobyte_util.go index cd32637c581..2b5db49fa23 100644 --- a/pkg/volume/quobyte/quobyte_util.go +++ b/pkg/volume/quobyte/quobyte_util.go @@ -32,11 +32,11 @@ type quobyteVolumeManager struct { config *quobyteAPIConfig } -func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *v1.QuobyteVolumeSource, size int, err error) { +func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) { capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) // Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited) - // to simulate a size constraint we could set here a Quota + // to simulate a size constraint we set here a Quota for logical space volumeRequest := &quobyteapi.CreateVolumeRequest{ Name: provisioner.volume, RootUserID: provisioner.user, @@ -45,10 +45,20 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv ConfigurationName: provisioner.config, } - if _, err := manager.createQuobyteClient().CreateVolume(volumeRequest); err != nil { + quobyteClient := manager.createQuobyteClient() + volumeUUID, err := quobyteClient.CreateVolume(volumeRequest) + if err != nil { return &v1.QuobyteVolumeSource{}, volumeSize, err } + // Set Quota for Volume with specified byte size + if createQuota { + err = quobyteClient.SetVolumeQuota(volumeUUID, uint64(capacity.Value())) + if err != nil { + return &v1.QuobyteVolumeSource{}, volumeSize, err + } + } + glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume) return &v1.QuobyteVolumeSource{ Registry: provisioner.registry, diff --git a/pkg/volume/rbd/OWNERS b/pkg/volume/rbd/OWNERS index 51fdf5e5c0f..f818be5f646 100644 --- a/pkg/volume/rbd/OWNERS +++ b/pkg/volume/rbd/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/secret/OWNERS b/pkg/volume/secret/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/secret/OWNERS +++ b/pkg/volume/secret/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/testing/OWNERS b/pkg/volume/testing/OWNERS index 30b3955dd65..f0e4b2d5614 100755 --- a/pkg/volume/testing/OWNERS +++ b/pkg/volume/testing/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/util.go b/pkg/volume/util.go index e0900cb1e8c..a674d37829c 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -180,7 +180,10 @@ func (c *realRecyclerClient) Event(eventtype, message string) { } func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { - podSelector, _ := fields.ParseSelector("metadata.name=" + name) + podSelector, err := fields.ParseSelector("metadata.name=" + name) + if err != nil { + return nil, err + } options := metav1.ListOptions{ FieldSelector: podSelector.String(), Watch: true, diff --git a/pkg/volume/util/atomic_writer.go b/pkg/volume/util/atomic_writer.go index 5eef55b4508..1d88d5ebfa4 100644 --- a/pkg/volume/util/atomic_writer.go +++ b/pkg/volume/util/atomic_writer.go @@ -311,11 +311,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St } relativePath := strings.TrimPrefix(path, w.targetDir) - if runtime.GOOS == "windows" { - relativePath = strings.TrimPrefix(relativePath, "\\") - } else { - relativePath = strings.TrimPrefix(relativePath, "/") - } + relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator)) if strings.HasPrefix(relativePath, "..") { return nil } @@ -339,7 +335,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St for subPath := file; subPath != ""; { newPaths.Insert(subPath) subPath, _ = filepath.Split(subPath) - subPath = strings.TrimSuffix(subPath, "/") + subPath = strings.TrimSuffix(subPath, string(os.PathSeparator)) } } glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) @@ -424,7 +420,7 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) // Since filepath.Split leaves a trailing path separator, in this // example, dir = "foo/". In order to calculate the number of // subdirectories, we must subtract 1 from the number returned by split. - subDirs = len(strings.Split(dir, "/")) - 1 + subDirs = len(strings.Split(dir, string(os.PathSeparator))) - 1 err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm) if err != nil { return err diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 08f0776c27a..98e436c31af 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -75,6 +75,15 @@ func SetReady(dir string) { // UnmountPath is a common unmount routine that unmounts the given path and // deletes the remaining directory if successful. func UnmountPath(mountPath string, mounter mount.Interface) error { + return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) +} + +// UnmountMountPoint is a common unmount routine that unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts. +func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { if pathExists, pathErr := PathExists(mountPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { @@ -82,16 +91,26 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { return nil } - notMnt, err := mounter.IsLikelyNotMountPoint(mountPath) + var notMnt bool + var err error + + if extensiveMountPointCheck { + notMnt, err = mount.IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } + if err != nil { return err } + if notMnt { glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) return os.Remove(mountPath) } // Unmount the mount path + glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) if err := mounter.Unmount(mountPath); err != nil { return err } diff --git a/pkg/volume/vsphere_volume/OWNERS b/pkg/volume/vsphere_volume/OWNERS index f6808465ec8..23d64e4b0ef 100755 --- a/pkg/volume/vsphere_volume/OWNERS +++ b/pkg/volume/vsphere_volume/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/plugin/cmd/kube-scheduler/app/BUILD b/plugin/cmd/kube-scheduler/app/BUILD index 807faa64bcc..08802b9991c 100644 --- a/plugin/cmd/kube-scheduler/app/BUILD +++ b/plugin/cmd/kube-scheduler/app/BUILD @@ -22,8 +22,6 @@ go_library( "//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", "//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library", - "//pkg/client/leaderelection:go_default_library", - "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/configz:go_default_library", "//plugin/cmd/kube-scheduler/app/options:go_default_library", @@ -40,9 +38,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", + "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/plugin/cmd/kube-scheduler/app/configurator.go b/plugin/cmd/kube-scheduler/app/configurator.go index 7b0871cb30c..7acb0ba98b0 100644 --- a/plugin/cmd/kube-scheduler/app/configurator.go +++ b/plugin/cmd/kube-scheduler/app/configurator.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" @@ -54,10 +55,11 @@ func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) re return eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: s.SchedulerName}) } -func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) { +// TODO: convert scheduler to only use client-go's clientset. +func createClient(s *options.SchedulerServer) (*clientset.Clientset, *kubernetes.Clientset, error) { kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { - return nil, fmt.Errorf("unable to build config from flags: %v", err) + return nil, nil, fmt.Errorf("unable to build config from flags: %v", err) } kubeconfig.ContentType = s.ContentType @@ -67,9 +69,13 @@ func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) { cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election")) if err != nil { - return nil, fmt.Errorf("invalid API configuration: %v", err) + return nil, nil, fmt.Errorf("invalid API configuration: %v", err) } - return cli, nil + clientgoCli, err := kubernetes.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election")) + if err != nil { + return nil, nil, fmt.Errorf("invalid API configuration: %v", err) + } + return cli, clientgoCli, nil } // CreateScheduler encapsulates the entire creation of a runnable scheduler. diff --git a/plugin/cmd/kube-scheduler/app/options/BUILD b/plugin/cmd/kube-scheduler/app/options/BUILD index 2dd85f4b3ce..6aa7266cf21 100644 --- a/plugin/cmd/kube-scheduler/app/options/BUILD +++ b/plugin/cmd/kube-scheduler/app/options/BUILD @@ -16,7 +16,7 @@ go_library( "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig/install:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library", - "//pkg/client/leaderelection:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//plugin/pkg/scheduler/factory:go_default_library", diff --git a/plugin/cmd/kube-scheduler/app/options/options.go b/plugin/cmd/kube-scheduler/app/options/options.go index 9c00323e317..a1c30cc30b5 100644 --- a/plugin/cmd/kube-scheduler/app/options/options.go +++ b/plugin/cmd/kube-scheduler/app/options/options.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" - "k8s.io/kubernetes/pkg/client/leaderelection" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/plugin/pkg/scheduler/factory" @@ -91,6 +91,6 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file") fs.StringVar(&s.FailureDomains, "failure-domains", kubeletapis.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.") - leaderelection.BindFlags(&s.LeaderElection, fs) + leaderelectionconfig.BindFlags(&s.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 653e3398f49..8e369931fb1 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -28,9 +28,9 @@ import ( "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" - "k8s.io/kubernetes/pkg/client/leaderelection" - "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" @@ -65,7 +65,7 @@ through the API as necessary.`, // Run runs the specified SchedulerServer. This should never exit. func Run(s *options.SchedulerServer) error { - kubecli, err := createClient(s) + kubecli, clientgoCli, err := createClient(s) if err != nil { return fmt.Errorf("unable to create kube client: %v", err) } @@ -121,7 +121,7 @@ func Run(s *options.SchedulerServer) error { rl, err := resourcelock.New(s.LeaderElection.ResourceLock, s.LockObjectNamespace, s.LockObjectName, - kubecli, + clientgoCli, resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: recorder, diff --git a/plugin/pkg/admission/noderestriction/OWNERS b/plugin/pkg/admission/noderestriction/OWNERS index e58cadf54d4..c44dfc9faa7 100644 --- a/plugin/pkg/admission/noderestriction/OWNERS +++ b/plugin/pkg/admission/noderestriction/OWNERS @@ -1,8 +1,8 @@ approvers: - deads2k - liggitt -- timstclair +- tallclair reviewers: - deads2k - liggitt -- timstclair +- tallclair diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index 1779cf57a38..ecf6863b338 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -92,7 +92,7 @@ func (c *nodePlugin) Admit(a admission.Attributes) error { if len(nodeName) == 0 { // disallow requests we cannot match to a particular node - return admission.NewForbidden(a, fmt.Errorf("could not determine node from user %s", a.GetUserInfo().GetName())) + return admission.NewForbidden(a, fmt.Errorf("could not determine node from user %q", a.GetUserInfo().GetName())) } switch a.GetResource().GroupResource() { @@ -103,7 +103,7 @@ func (c *nodePlugin) Admit(a admission.Attributes) error { case "status": return c.admitPodStatus(nodeName, a) default: - return admission.NewForbidden(a, fmt.Errorf("unexpected pod subresource %s", a.GetSubresource())) + return admission.NewForbidden(a, fmt.Errorf("unexpected pod subresource %q", a.GetSubresource())) } case nodeResource: @@ -125,31 +125,31 @@ func (c *nodePlugin) admitPod(nodeName string, a admission.Attributes) error { // only allow nodes to create mirror pods if _, isMirrorPod := pod.Annotations[api.MirrorPodAnnotationKey]; !isMirrorPod { - return admission.NewForbidden(a, fmt.Errorf("pod does not have %q annotation, node %s can only create mirror pods", api.MirrorPodAnnotationKey, nodeName)) + return admission.NewForbidden(a, fmt.Errorf("pod does not have %q annotation, node %q can only create mirror pods", api.MirrorPodAnnotationKey, nodeName)) } // only allow nodes to create a pod bound to itself if pod.Spec.NodeName != nodeName { - return admission.NewForbidden(a, fmt.Errorf("node %s can only create pods with spec.nodeName set to itself", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can only create pods with spec.nodeName set to itself", nodeName)) } // don't allow a node to create a pod that references any other API objects if pod.Spec.ServiceAccountName != "" { - return admission.NewForbidden(a, fmt.Errorf("node %s can not create pods that reference a service account", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can not create pods that reference a service account", nodeName)) } hasSecrets := false podutil.VisitPodSecretNames(pod, func(name string) (shouldContinue bool) { hasSecrets = true; return false }) if hasSecrets { - return admission.NewForbidden(a, fmt.Errorf("node %s can not create pods that reference secrets", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can not create pods that reference secrets", nodeName)) } hasConfigMaps := false podutil.VisitPodConfigmapNames(pod, func(name string) (shouldContinue bool) { hasConfigMaps = true; return false }) if hasConfigMaps { - return admission.NewForbidden(a, fmt.Errorf("node %s can not create pods that reference configmaps", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can not create pods that reference configmaps", nodeName)) } for _, v := range pod.Spec.Volumes { if v.PersistentVolumeClaim != nil { - return admission.NewForbidden(a, fmt.Errorf("node %s can not create pods that reference persistentvolumeclaims", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can not create pods that reference persistentvolumeclaims", nodeName)) } } @@ -167,12 +167,12 @@ func (c *nodePlugin) admitPod(nodeName string, a admission.Attributes) error { } // only allow a node to delete a pod bound to itself if existingPod.Spec.NodeName != nodeName { - return admission.NewForbidden(a, fmt.Errorf("node %s can only delete pods with spec.nodeName set to itself", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can only delete pods with spec.nodeName set to itself", nodeName)) } return nil default: - return admission.NewForbidden(a, fmt.Errorf("unexpected operation %s", a.GetOperation())) + return admission.NewForbidden(a, fmt.Errorf("unexpected operation %q", a.GetOperation())) } } @@ -186,12 +186,12 @@ func (c *nodePlugin) admitPodStatus(nodeName string, a admission.Attributes) err } // only allow a node to update status of a pod bound to itself if pod.Spec.NodeName != nodeName { - return admission.NewForbidden(a, fmt.Errorf("node %s can only update pod status for pods with spec.nodeName set to itself", nodeName)) + return admission.NewForbidden(a, fmt.Errorf("node %q can only update pod status for pods with spec.nodeName set to itself", nodeName)) } return nil default: - return admission.NewForbidden(a, fmt.Errorf("unexpected operation %s", a.GetOperation())) + return admission.NewForbidden(a, fmt.Errorf("unexpected operation %q", a.GetOperation())) } } @@ -208,7 +208,7 @@ func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { } if requestedName != nodeName { - return admission.NewForbidden(a, fmt.Errorf("node %s cannot modify node %s", nodeName, requestedName)) + return admission.NewForbidden(a, fmt.Errorf("node %q cannot modify node %q", nodeName, requestedName)) } return nil } diff --git a/plugin/pkg/auth/authorizer/node/OWNERS b/plugin/pkg/auth/authorizer/node/OWNERS index a62844dc284..312724201f9 100644 --- a/plugin/pkg/auth/authorizer/node/OWNERS +++ b/plugin/pkg/auth/authorizer/node/OWNERS @@ -1,9 +1,9 @@ approvers: -- timstclair +- tallclair - liggitt - deads2k reviewers: -- timstclair +- tallclair - liggitt - deads2k - ericchiang diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer.go b/plugin/pkg/auth/authorizer/node/node_authorizer.go index 5e5d7362c65..46a24eba1e0 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -48,7 +48,7 @@ type NodeAuthorizer struct { nodeRules []rbacapi.PolicyRule } -// New returns a new node authorizer +// NewAuthorizer returns a new node authorizer func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacapi.PolicyRule) authorizer.Authorizer { return &NodeAuthorizer{ graph: graph, @@ -113,7 +113,7 @@ func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, return false, "no path found to object", nil } if !ok { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + glog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) return false, "no path found to object", nil } return ok, "", nil @@ -126,12 +126,12 @@ func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, s nodeVertex, exists := r.graph.getVertex_rlocked(nodeVertexType, "", nodeName) if !exists { - return false, fmt.Errorf("unknown node %s cannot get %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName) + return false, fmt.Errorf("unknown node %q cannot get %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName) } startingVertex, exists := r.graph.getVertex_rlocked(startingType, startingNamespace, startingName) if !exists { - return false, fmt.Errorf("node %s cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName) + return false, fmt.Errorf("node %q cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName) } found := false @@ -158,7 +158,7 @@ func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, s return found }) if !found { - return false, fmt.Errorf("node %s cannot get %s %s/%s, no path was found", nodeName, vertexTypes[startingType], startingNamespace, startingName) + return false, fmt.Errorf("node %q cannot get %s %s/%s, no path was found", nodeName, vertexTypes[startingType], startingNamespace, startingName) } return true, nil } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 1697ad680d9..e2693416b39 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -243,6 +243,7 @@ func ClusterRoles() []rbac.ClusterRole { ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"}, Rules: []rbac.PolicyRule{ rbac.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(), + rbac.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(), }, }, { diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index d5c47f0fa6d..939d68ac457 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -404,6 +404,14 @@ items: - get - list - watch + - apiGroups: + - extensions + resources: + - deployments + verbs: + - get + - list + - watch - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: diff --git a/plugin/pkg/scheduler/BUILD b/plugin/pkg/scheduler/BUILD index b028b281c40..29a0fddc114 100644 --- a/plugin/pkg/scheduler/BUILD +++ b/plugin/pkg/scheduler/BUILD @@ -15,12 +15,12 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/core:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/testing:go_default_library", + "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/scheduler/OWNERS b/plugin/pkg/scheduler/OWNERS index 7b17cd0f2ee..d2792efd526 100644 --- a/plugin/pkg/scheduler/OWNERS +++ b/plugin/pkg/scheduler/OWNERS @@ -1,12 +1,4 @@ approvers: -- davidopp -- timothysc -- wojtek-t -- k82cn +- sig-scheduling-maintainers reviewers: -- davidopp -- bsalamat -- timothysc -- wojtek-t -- k82cn -- jayunit100 +- sig-scheduling diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata.go b/plugin/pkg/scheduler/algorithm/predicates/metadata.go index 78af322ef5c..269345a354c 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata.go +++ b/plugin/pkg/scheduler/algorithm/predicates/metadata.go @@ -53,7 +53,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf matchingAntiAffinityTerms: matchingTerms, } for predicateName, precomputeFunc := range predicatePrecomputations { - glog.V(10).Info("Precompute: %v", predicateName) + glog.V(10).Infof("Precompute: %v", predicateName) precomputeFunc(predicateMetadata) } return predicateMetadata diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index dbb214c943d..518ec17e678 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -44,7 +44,7 @@ import ( "k8s.io/metrics/pkg/client/clientset_generated/clientset" ) -// predicatePrecomputations: Helper types/variables... +// PredicateMetadataModifier: Helper types/variables... type PredicateMetadataModifier func(pm *predicateMetadata) var predicatePrecomputeRegisterLock sync.Mutex @@ -56,7 +56,7 @@ func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMeta predicatePrecomputations[predicateName] = precomp } -// Other types for predicate functions... +// NodeInfo: Other types for predicate functions... type NodeInfo interface { GetNodeInfo(nodeID string) (*v1.Node, error) } @@ -377,7 +377,7 @@ type VolumeZoneChecker struct { pvcInfo PersistentVolumeClaimInfo } -// VolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given +// NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given // that some volumes may have zone scheduling constraints. The requirement is that any // volume zone-labels must match the equivalent zone-labels on the node. It is OK for // the node to have more zone-label constraints (for example, a hypothetical replicated @@ -474,10 +474,10 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s return true, nil, nil } -// Returns a *schedulercache.Resource that covers the largest width in each -// resource dimension. Because init-containers run sequentially, we collect the -// max in each dimension iteratively. In contrast, we sum the resource vectors -// for regular containers since they run simultaneously. +// GetResourceRequest returns a *schedulercache.Resource that covers the largest +// width in each resource dimension. Because init-containers run sequentially, we collect +// the max in each dimension iteratively. In contrast, we sum the resource vectors for +// regular containers since they run simultaneously. // // Example: // @@ -499,30 +499,15 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s // // Result: CPU: 3, Memory: 3G func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { - result := schedulercache.Resource{} + result := &schedulercache.Resource{} for _, container := range pod.Spec.Containers { - for rName, rQuantity := range container.Resources.Requests { - switch rName { - case v1.ResourceMemory: - result.Memory += rQuantity.Value() - case v1.ResourceCPU: - result.MilliCPU += rQuantity.MilliValue() - case v1.ResourceNvidiaGPU: - result.NvidiaGPU += rQuantity.Value() - case v1.ResourceStorageOverlay: - result.StorageOverlay += rQuantity.Value() - default: - if v1helper.IsOpaqueIntResourceName(rName) { - result.AddOpaque(rName, rQuantity.Value()) - } - } - } + result.Add(container.Resources.Requests) } + // Account for storage requested by emptydir volumes // If the storage medium is memory, should exclude the size for _, vol := range pod.Spec.Volumes { if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { - result.StorageScratch += vol.EmptyDir.SizeLimit.Value() } } @@ -557,7 +542,8 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { } } } - return &result + + return result } func podName(pod *v1.Pod) string { @@ -893,11 +879,17 @@ func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No // search two arrays and return true if they have at least one common element; return false otherwise func haveSame(a1, a2 []string) bool { - for _, val1 := range a1 { - for _, val2 := range a2 { - if val1 == val2 { - return true - } + m := map[string]int{} + + for _, val := range a1 { + m[val] = 1 + } + for _, val := range a2 { + m[val] = m[val] + 1 + } + for _, val := range m { + if val > 1 { + return true } } return false @@ -1247,15 +1239,26 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node // PodToleratesNodeTaints checks if a pod tolertaions can tolerate the node taints func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { + // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. + return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute + }) +} + +// PodToleratesNodeNoExecuteTaints checks if a pod tolertaions can tolerate the node's NoExecute taints +func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { + return t.Effect == v1.TaintEffectNoExecute + }) +} + +func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err } - if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool { - // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. - return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute - }) { + if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil @@ -1304,7 +1307,7 @@ type VolumeNodeChecker struct { client clientset.Interface } -// VolumeNodeChecker evaluates if a pod can fit due to the volumes it requests, given +// NewVolumeNodePredicate evaluates if a pod can fit due to the volumes it requests, given // that some volumes have node topology constraints, particularly when using Local PVs. // The requirement is that any pod that uses a PVC that is bound to a PV with topology constraints // must be scheduled to a node that satisfies the PV's topology labels. diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index e662f667860..5739bedf85a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -71,7 +71,7 @@ func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercach // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. diff := math.Abs(cpuFraction - memoryFraction) - score = int(10 - diff*10) + score = int((1 - diff) * float64(schedulerapi.MaxPriority)) } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is @@ -98,8 +98,8 @@ func fractionOfCapacity(requested, capacity int64) float64 { return float64(requested) / float64(capacity) } -// BalancedResourceAllocation favors nodes with balanced resource usage rate. -// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority. +// BalancedResourceAllocationMap favors nodes with balanced resource usage rate. +// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority. // It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how // close the two metrics are to each other. // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index b1a2f7529ea..09a4afd9bf5 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -110,7 +110,7 @@ func TestBalancedResourceAllocation(t *testing.T) { */ pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "nothing scheduled, nothing requested", }, { @@ -127,7 +127,7 @@ func TestBalancedResourceAllocation(t *testing.T) { */ pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "nothing scheduled, resources requested, differently sized machines", }, { @@ -144,7 +144,7 @@ func TestBalancedResourceAllocation(t *testing.T) { */ pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "no resources requested, pods scheduled", pods: []*v1.Pod{ {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go index e72a52b9cac..67dba1b1af2 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -// ImageLocalityPriority is a priority function that favors nodes that already have requested pod container's images. +// ImageLocalityPriorityMap is a priority function that favors nodes that already have requested pod container's images. // It will detect whether the requested images are present on a node, and then calculate a score ranging from 0 to 10 // based on the total size of those images. // - If none of the images are present, this node will be given the lowest priority. @@ -57,9 +57,9 @@ func calculateScoreFromSize(sumSize int64) int { score = 0 // If existing images' total size is larger than max, just make it highest priority. case sumSize >= maxImgSize: - score = 10 + score = schedulerapi.MaxPriority default: - score = int((10 * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1) + score = int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1) } // Return which bucket the given size belongs to return score diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go b/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go index 2cae0d20a88..c2e0feabbdc 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -154,7 +154,7 @@ func TestImageLocalityPriority(t *testing.T) { // Score: 10 < min score = 0 pod: &v1.Pod{Spec: test_min_max}, nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "if exceed limit, use limit", }, } diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 8a7d5d140b3..489a530cbf4 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -111,7 +111,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm } } -// compute a sum by iterating through the elements of weightedPodAffinityTerm and adding +// CalculateInterPodAffinityPriority compute a sum by iterating through the elements of weightedPodAffinityTerm and adding // "weight" to the sum if the corresponding PodAffinityTerm is satisfied for // that node; the node(s) with the highest sum are the most preferred. // Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, @@ -224,7 +224,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node for _, node := range nodes { fScore := float64(0) if (maxCount - minCount) > 0 { - fScore = 10 * ((pm.counts[node.Name] - minCount) / (maxCount - minCount)) + fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount)) } result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) if glog.V(10) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index e7e55829baf..7ccd963bcf8 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -294,7 +294,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + "which doesn't match either pods in nodes or in topology key", }, @@ -312,7 +312,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", }, // there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference. @@ -336,7 +336,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}}, test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", }, // Test with the different operators and values for pod affinity scheduling preference, including some match failures. @@ -352,7 +352,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", }, // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, @@ -368,7 +368,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, { @@ -382,7 +382,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, @@ -402,7 +402,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", }, { @@ -415,7 +415,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", }, { @@ -429,7 +429,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", }, // Test the symmetry cases for anti affinity @@ -443,7 +443,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", }, // Test both affinity and anti-affinity @@ -457,7 +457,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", }, // Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service), @@ -482,7 +482,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}}, test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", }, // Consider Affinity, Anti Affinity and symmetry together. @@ -504,7 +504,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}}, test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", }, } @@ -577,7 +577,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", }, { diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go index 553e89f1efa..74306451638 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go @@ -53,7 +53,7 @@ func calculateUnusedScore(requested int64, capacity int64, node string) int64 { requested, capacity, node) return 0 } - return ((capacity - requested) * 10) / capacity + return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity } // Calculates host priority based on the amount of unused resources. diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go index 9b896b2fa2f..05afb586a1e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -110,7 +110,7 @@ func TestLeastRequested(t *testing.T) { */ pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "nothing scheduled, nothing requested", }, { @@ -144,7 +144,7 @@ func TestLeastRequested(t *testing.T) { */ pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "no resources requested, pods scheduled", pods: []*v1.Pod{ {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go index 320399e3133..4245d4938ba 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go @@ -57,7 +57,7 @@ func calculateUsedScore(requested int64, capacity int64, node string) int64 { requested, capacity, node) return 0 } - return (requested * 10) / capacity + return (requested * schedulerapi.MaxPriority) / capacity } // Calculate the resource used on a node. 'node' has information about the resources on the node. diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index a0dd7f09ef8..b8a8077f349 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -87,7 +87,7 @@ func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeName var fScore float64 for i := range result { if maxCount > 0 { - fScore = 10 * (float64(result[i].Score) / maxCountFloat) + fScore = float64(schedulerapi.MaxPriority) * (float64(result[i].Score) / maxCountFloat) } else { fScore = 0 } diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go index ff30832b1f2..9d425661a92 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -146,7 +146,7 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "only machine1 matches the preferred scheduling requirements of pod", }, { @@ -160,7 +160,7 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}}, test: "all machines matches the preferred scheduling requirements of pod but with different priorities ", }, } diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label.go b/plugin/pkg/scheduler/algorithm/priorities/node_label.go index 092efa1ed69..7eef5a3bd76 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_label.go @@ -51,7 +51,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i exists := labels.Set(node.Labels).Has(n.label) score := 0 if (exists && n.presence) || (!exists && !n.presence) { - score = 10 + score = schedulerapi.MaxPriority } return schedulerapi.HostPriority{ Host: node.Name, diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go index 3bc039c12ff..fbced34e336 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go @@ -55,7 +55,7 @@ func TestNewNodeLabelPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "baz", presence: false, test: "no match found, presence false", @@ -66,7 +66,7 @@ func TestNewNodeLabelPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "foo", presence: true, test: "one match found, presence true", @@ -77,7 +77,7 @@ func TestNewNodeLabelPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "foo", presence: false, test: "one match found, presence false", @@ -88,7 +88,7 @@ func TestNewNodeLabelPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "bar", presence: true, test: "two matches found, presence true", @@ -99,7 +99,7 @@ func TestNewNodeLabelPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "bar", presence: false, test: "two matches found, presence false", diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index b0b0f3dbfc8..985e323f466 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -41,13 +41,13 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node } } if controllerRef == nil { - return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil + return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil } avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations) if err != nil { // If we cannot get annotation, assume it's schedulable there. - return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil + return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil } for i := range avoids.PreferAvoidPods { avoid := &avoids.PreferAvoidPods[i] @@ -55,5 +55,5 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil } } - return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil + return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil } diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index 05b1a0823cb..a18ddcc03d6 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -96,7 +96,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, }, nodes: testNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score", }, { @@ -109,7 +109,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, }, nodes: testNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, test: "ownership by random controller should be ignored", }, { @@ -122,7 +122,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, }, nodes: testNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, test: "owner without Controller field set should be ignored", }, { @@ -135,7 +135,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, }, nodes: testNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score", }, } diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index 1888b651ddd..a706a97eb48 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -30,13 +30,9 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -// The maximum priority value to give to a node -// Priority values range from 0-maxPriority -const maxPriority float32 = 10 - // When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading // TODO: Any way to justify this weighting? -const zoneWeighting = 2.0 / 3.0 +const zoneWeighting float64 = 2.0 / 3.0 type SelectorSpread struct { serviceLister algorithm.ServiceLister @@ -103,15 +99,15 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map selectors := s.getSelectors(pod) // Count similar pods by node - countsByNodeName := make(map[string]float32, len(nodes)) - countsByZone := make(map[string]float32, 10) - maxCountByNodeName := float32(0) + countsByNodeName := make(map[string]float64, len(nodes)) + countsByZone := make(map[string]float64, 10) + maxCountByNodeName := float64(0) countsByNodeNameLock := sync.Mutex{} if len(selectors) > 0 { processNodeFunc := func(i int) { nodeName := nodes[i].Name - count := float32(0) + count := float64(0) for _, nodePod := range nodeNameToInfo[nodeName].Pods() { if pod.Namespace != nodePod.Namespace { continue @@ -153,7 +149,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map // Aggregate by-zone information // Compute the maximum number of pods hosted in any zone haveZones := len(countsByZone) != 0 - maxCountByZone := float32(0) + maxCountByZone := float64(0) for _, count := range countsByZone { if count > maxCountByZone { maxCountByZone = count @@ -165,16 +161,16 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map // 0 being the lowest priority and maxPriority being the highest for _, node := range nodes { // initializing to the default/max node score of maxPriority - fScore := maxPriority + fScore := float64(schedulerapi.MaxPriority) if maxCountByNodeName > 0 { - fScore = maxPriority * ((maxCountByNodeName - countsByNodeName[node.Name]) / maxCountByNodeName) + fScore = float64(schedulerapi.MaxPriority) * ((maxCountByNodeName - countsByNodeName[node.Name]) / maxCountByNodeName) } // If there is zone information present, incorporate it if haveZones { zoneId := utilnode.GetZoneKey(node) if zoneId != "" { - zoneScore := maxPriority * ((maxCountByZone - countsByZone[zoneId]) / maxCountByZone) + zoneScore := float64(schedulerapi.MaxPriority) * ((maxCountByZone - countsByZone[zoneId]) / maxCountByZone) fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore) } } @@ -258,9 +254,9 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNam // 0 being the lowest priority and maxPriority being the highest for node := range labeledNodes { // initializing to the default/max node score of maxPriority - fScore := float32(maxPriority) + fScore := float64(schedulerapi.MaxPriority) if numServicePods > 0 { - fScore = maxPriority * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods)) + fScore = float64(schedulerapi.MaxPriority) * (float64(numServicePods-podCounts[labeledNodes[node]]) / float64(numServicePods)) } result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)}) } diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 76045c1eaec..9cef5403a54 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -69,14 +69,14 @@ func TestSelectorSpreadPriority(t *testing.T) { { pod: new(v1.Pod), nodes: []string{"machine1", "machine2"}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "nothing scheduled", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, pods: []*v1.Pod{{Spec: zone1Spec}}, nodes: []string{"machine1", "machine2"}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "no services", }, { @@ -84,7 +84,7 @@ func TestSelectorSpreadPriority(t *testing.T) { pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}}, nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, test: "different services", }, { @@ -95,7 +95,7 @@ func TestSelectorSpreadPriority(t *testing.T) { }, nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "two pods, one service pod", }, { @@ -109,7 +109,7 @@ func TestSelectorSpreadPriority(t *testing.T) { }, nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "five pods, one service pod in no namespace", }, { @@ -122,7 +122,7 @@ func TestSelectorSpreadPriority(t *testing.T) { }, nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "four pods, one service pod in default namespace", }, { @@ -136,7 +136,7 @@ func TestSelectorSpreadPriority(t *testing.T) { }, nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, test: "five pods, one service pod in specific namespace", }, { @@ -409,12 +409,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { { pod: new(v1.Pod), expectedList: []schedulerapi.HostPriority{ - {Host: nodeMachine1Zone1, Score: 10}, - {Host: nodeMachine1Zone2, Score: 10}, - {Host: nodeMachine2Zone2, Score: 10}, - {Host: nodeMachine1Zone3, Score: 10}, - {Host: nodeMachine2Zone3, Score: 10}, - {Host: nodeMachine3Zone3, Score: 10}, + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, test: "nothing scheduled", }, @@ -422,12 +422,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { pod: buildPod("", labels1, nil), pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)}, expectedList: []schedulerapi.HostPriority{ - {Host: nodeMachine1Zone1, Score: 10}, - {Host: nodeMachine1Zone2, Score: 10}, - {Host: nodeMachine2Zone2, Score: 10}, - {Host: nodeMachine1Zone3, Score: 10}, - {Host: nodeMachine2Zone3, Score: 10}, - {Host: nodeMachine3Zone3, Score: 10}, + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, test: "no services", }, @@ -436,12 +436,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{ - {Host: nodeMachine1Zone1, Score: 10}, - {Host: nodeMachine1Zone2, Score: 10}, - {Host: nodeMachine2Zone2, Score: 10}, - {Host: nodeMachine1Zone3, Score: 10}, - {Host: nodeMachine2Zone3, Score: 10}, - {Host: nodeMachine3Zone3, Score: 10}, + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, test: "different services", }, @@ -453,12 +453,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ - {Host: nodeMachine1Zone1, Score: 10}, + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, {Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine {Host: nodeMachine2Zone2, Score: 3}, // Already have pod in zone - {Host: nodeMachine1Zone3, Score: 10}, - {Host: nodeMachine2Zone3, Score: 10}, - {Host: nodeMachine3Zone3, Score: 10}, + {Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, + {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, test: "two pods, 1 matching (in z2)", }, @@ -473,7 +473,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ - {Host: nodeMachine1Zone1, Score: 10}, + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, {Host: nodeMachine1Zone2, Score: 0}, // Pod on node {Host: nodeMachine2Zone2, Score: 0}, // Pod on node {Host: nodeMachine1Zone3, Score: 6}, // Pod in zone @@ -536,12 +536,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { // We would probably prefer to see a bigger gap between putting a second // pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct. // This is also consistent with what we have already. - {Host: nodeMachine1Zone1, Score: 10}, // No pods in zone - {Host: nodeMachine1Zone2, Score: 5}, // Pod on node - {Host: nodeMachine2Zone2, Score: 6}, // Pod in zone - {Host: nodeMachine1Zone3, Score: 0}, // Two pods on node - {Host: nodeMachine2Zone3, Score: 3}, // Pod in zone - {Host: nodeMachine3Zone3, Score: 3}, // Pod in zone + {Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone + {Host: nodeMachine1Zone2, Score: 5}, // Pod on node + {Host: nodeMachine2Zone2, Score: 6}, // Pod in zone + {Host: nodeMachine1Zone3, Score: 0}, // Two pods on node + {Host: nodeMachine2Zone3, Score: 3}, // Pod in zone + {Host: nodeMachine3Zone3, Score: 3}, // Pod in zone }, test: "Replication controller spreading (z1=0, z2=1, z3=2)", }, @@ -611,8 +611,8 @@ func TestZoneSpreadPriority(t *testing.T) { { pod: new(v1.Pod), nodes: labeledNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, - {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, + expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, + {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "nothing scheduled", }, @@ -620,8 +620,8 @@ func TestZoneSpreadPriority(t *testing.T) { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, pods: []*v1.Pod{{Spec: zone1Spec}}, nodes: labeledNodes, - expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, - {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, + expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, + {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "no services", }, @@ -630,8 +630,8 @@ func TestZoneSpreadPriority(t *testing.T) { pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}}, nodes: labeledNodes, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, - {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, + expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, + {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "different services", }, @@ -644,7 +644,7 @@ func TestZoneSpreadPriority(t *testing.T) { }, nodes: labeledNodes, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, - expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, + expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, {Host: "machine21", Score: 0}, {Host: "machine22", Score: 0}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "three pods, one service pod", @@ -674,7 +674,7 @@ func TestZoneSpreadPriority(t *testing.T) { nodes: labeledNodes, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0}, - {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, + {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "three service label match pods in different namespaces", }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go index 56b95453457..5817e4c89ce 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -83,13 +83,10 @@ func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNam } maxCountFloat := float64(maxCount) - // The maximum priority value to give to a node - // Priority values range from 0 - maxPriority - const maxPriority = float64(10) for i := range result { - fScore := maxPriority + fScore := float64(schedulerapi.MaxPriority) if maxCountFloat > 0 { - fScore = (1.0 - float64(result[i].Score)/maxCountFloat) * 10 + fScore = (1.0 - float64(result[i].Score)/maxCountFloat) * float64(schedulerapi.MaxPriority) } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go index 223d0c36c90..50e0b4d36f9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -78,7 +78,7 @@ func TestTaintAndToleration(t *testing.T) { }}), }, expectedList: []schedulerapi.HostPriority{ - {Host: "nodeA", Score: 10}, + {Host: "nodeA", Score: schedulerapi.MaxPriority}, {Host: "nodeB", Score: 0}, }, }, @@ -120,9 +120,9 @@ func TestTaintAndToleration(t *testing.T) { }), }, expectedList: []schedulerapi.HostPriority{ - {Host: "nodeA", Score: 10}, - {Host: "nodeB", Score: 10}, - {Host: "nodeC", Score: 10}, + {Host: "nodeA", Score: schedulerapi.MaxPriority}, + {Host: "nodeB", Score: schedulerapi.MaxPriority}, + {Host: "nodeC", Score: schedulerapi.MaxPriority}, }, }, // the count of taints on a node that are not tolerated by pod, matters. @@ -156,7 +156,7 @@ func TestTaintAndToleration(t *testing.T) { }), }, expectedList: []schedulerapi.HostPriority{ - {Host: "nodeA", Score: 10}, + {Host: "nodeA", Score: schedulerapi.MaxPriority}, {Host: "nodeB", Score: 5}, {Host: "nodeC", Score: 0}, }, @@ -199,8 +199,8 @@ func TestTaintAndToleration(t *testing.T) { }), }, expectedList: []schedulerapi.HostPriority{ - {Host: "nodeA", Score: 10}, - {Host: "nodeB", Score: 10}, + {Host: "nodeA", Score: schedulerapi.MaxPriority}, + {Host: "nodeB", Score: schedulerapi.MaxPriority}, {Host: "nodeC", Score: 0}, }, }, @@ -220,7 +220,7 @@ func TestTaintAndToleration(t *testing.T) { }), }, expectedList: []schedulerapi.HostPriority{ - {Host: "nodeA", Score: 10}, + {Host: "nodeA", Score: schedulerapi.MaxPriority}, {Host: "nodeB", Score: 0}, }, }, diff --git a/plugin/pkg/scheduler/api/validation/validation_test.go b/plugin/pkg/scheduler/api/validation/validation_test.go index 1e5f44ec91b..b7a6cd2ca9a 100644 --- a/plugin/pkg/scheduler/api/validation/validation_test.go +++ b/plugin/pkg/scheduler/api/validation/validation_test.go @@ -17,70 +17,60 @@ limitations under the License. package validation import ( + "errors" + "fmt" "testing" "k8s.io/kubernetes/plugin/pkg/scheduler/api" ) -func TestValidatePriorityWithNoWeight(t *testing.T) { - policy := api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority"}}} - if ValidatePolicy(policy) == nil { - t.Errorf("Expected error about priority weight not being positive") - } -} - -func TestValidatePriorityWithZeroWeight(t *testing.T) { - policy := api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority", Weight: 0}}} - if ValidatePolicy(policy) == nil { - t.Errorf("Expected error about priority weight not being positive") - } -} - -func TestValidatePriorityWithNonZeroWeight(t *testing.T) { - policy := api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: 2}}} - errs := ValidatePolicy(policy) - if errs != nil { - t.Errorf("Unexpected errors %v", errs) - } -} - -func TestValidatePriorityWithNegativeWeight(t *testing.T) { - policy := api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: -2}}} - if ValidatePolicy(policy) == nil { - t.Errorf("Expected error about priority weight not being positive") - } -} - -func TestValidatePriorityWithOverFlowWeight(t *testing.T) { - policy := api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: api.MaxWeight}}} - if ValidatePolicy(policy) == nil { - t.Errorf("Expected error about priority weight not being overflown.") - } -} - -func TestValidateExtenderWithNonNegativeWeight(t *testing.T) { - extenderPolicy := api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", FilterVerb: "filter", Weight: 2}}} - errs := ValidatePolicy(extenderPolicy) - if errs != nil { - t.Errorf("Unexpected errors %v", errs) - } -} - -func TestValidateExtenderWithNegativeWeight(t *testing.T) { - extenderPolicy := api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", FilterVerb: "filter", Weight: -2}}} - if ValidatePolicy(extenderPolicy) == nil { - t.Errorf("Expected error about priority weight for extender not being positive") - } -} - -func TestValidateMultipleExtendersWithBind(t *testing.T) { - extenderPolicy := api.Policy{ - ExtenderConfigs: []api.ExtenderConfig{ - {URLPrefix: "http://127.0.0.1:8081/extender", BindVerb: "bind"}, - {URLPrefix: "http://127.0.0.1:8082/extender", BindVerb: "bind"}, +func TestValidatePolicy(t *testing.T) { + tests := []struct { + policy api.Policy + expected error + }{ + { + policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority"}}}, + expected: errors.New("Priority NoWeightPriority should have a positive weight applied to it or it has overflown"), + }, + { + policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "NoWeightPriority", Weight: 0}}}, + expected: errors.New("Priority NoWeightPriority should have a positive weight applied to it or it has overflown"), + }, + { + policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: 2}}}, + expected: nil, + }, + { + policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: -2}}}, + expected: errors.New("Priority WeightPriority should have a positive weight applied to it or it has overflown"), + }, + { + policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: api.MaxWeight}}}, + expected: errors.New("Priority WeightPriority should have a positive weight applied to it or it has overflown"), + }, + { + policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", FilterVerb: "filter", Weight: 2}}}, + expected: nil, + }, + { + policy: api.Policy{ExtenderConfigs: []api.ExtenderConfig{{URLPrefix: "http://127.0.0.1:8081/extender", FilterVerb: "filter", Weight: -2}}}, + expected: errors.New("Priority for extender http://127.0.0.1:8081/extender should have a positive weight applied to it"), + }, + { + policy: api.Policy{ + ExtenderConfigs: []api.ExtenderConfig{ + {URLPrefix: "http://127.0.0.1:8081/extender", BindVerb: "bind", Weight: 2}, + {URLPrefix: "http://127.0.0.1:8082/extender", BindVerb: "bind", Weight: 2}, + }}, + expected: errors.New("Only one extender can implement bind, found 2"), }, } - if ValidatePolicy(extenderPolicy) == nil { - t.Errorf("Expected failure when multiple extenders with bind") + + for _, test := range tests { + actual := ValidatePolicy(test.policy) + if fmt.Sprint(test.expected) != fmt.Sprint(actual) { + t.Errorf("expected: %s, actual: %s", test.expected, actual) + } } } diff --git a/plugin/pkg/scheduler/core/equivalence_cache_test.go b/plugin/pkg/scheduler/core/equivalence_cache_test.go index ca2ce443b36..5f58a798119 100644 --- a/plugin/pkg/scheduler/core/equivalence_cache_test.go +++ b/plugin/pkg/scheduler/core/equivalence_cache_test.go @@ -29,31 +29,55 @@ import ( func TestUpdateCachedPredicateItem(t *testing.T) { tests := []struct { - name string - pod *v1.Pod - predicateKey string - nodeName string - fit bool - reasons []algorithm.PredicateFailureReason - equivalenceHash uint64 - expectCacheItem HostPredicate + name string + pod *v1.Pod + predicateKey string + nodeName string + fit bool + reasons []algorithm.PredicateFailureReason + equivalenceHash uint64 + expectPredicateMap bool + expectCacheItem HostPredicate }{ { - name: "test 1", - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, - predicateKey: "GeneralPredicates", - nodeName: "node1", - fit: true, - equivalenceHash: 123, + name: "test 1", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + predicateKey: "GeneralPredicates", + nodeName: "node1", + fit: true, + equivalenceHash: 123, + expectPredicateMap: false, expectCacheItem: HostPredicate{ Fit: true, }, }, + { + name: "test 2", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + predicateKey: "GeneralPredicates", + nodeName: "node2", + fit: false, + equivalenceHash: 123, + expectPredicateMap: true, + expectCacheItem: HostPredicate{ + Fit: false, + }, + }, } for _, test := range tests { // this case does not need to calculate equivalence hash, just pass an empty function fakeGetEquivalencePodFunc := func(pod *v1.Pod) interface{} { return nil } ecache := NewEquivalenceCache(fakeGetEquivalencePodFunc) + if test.expectPredicateMap { + ecache.algorithmCache[test.nodeName] = newAlgorithmCache() + predicateItem := HostPredicate{ + Fit: true, + } + ecache.algorithmCache[test.nodeName].predicatesCache.Add(test.predicateKey, + PredicateMap{ + test.equivalenceHash: predicateItem, + }) + } ecache.UpdateCachedPredicateItem(test.pod, test.nodeName, test.predicateKey, test.fit, test.reasons, test.equivalenceHash) value, ok := ecache.algorithmCache[test.nodeName].predicatesCache.Get(test.predicateKey) @@ -73,28 +97,82 @@ type predicateItemType struct { reasons []algorithm.PredicateFailureReason } -func TestInvalidateCachedPredicateItem(t *testing.T) { +func TestCachedPredicateItem(t *testing.T) { tests := []struct { - name string - pod *v1.Pod - nodeName string - predicateKey string - equivalenceHash uint64 - cachedItem predicateItemType - expectedInvalid bool - expectedPredicateItem predicateItemType + name string + pod *v1.Pod + nodeName string + predicateKey string + equivalenceHashForUpdatePredicate uint64 + equivalenceHashForCalPredicate uint64 + cachedItem predicateItemType + expectedInvalidPredicateKey bool + expectedInvalidEquivalenceHash bool + expectedPredicateItem predicateItemType }{ { - name: "test 1", - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, - nodeName: "node1", - equivalenceHash: 123, - predicateKey: "GeneralPredicates", + name: "test 1", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + nodeName: "node1", + equivalenceHashForUpdatePredicate: 123, + equivalenceHashForCalPredicate: 123, + predicateKey: "GeneralPredicates", cachedItem: predicateItemType{ fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, - expectedInvalid: true, + expectedInvalidPredicateKey: true, + expectedPredicateItem: predicateItemType{ + fit: false, + reasons: []algorithm.PredicateFailureReason{}, + }, + }, + { + name: "test 2", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + nodeName: "node2", + equivalenceHashForUpdatePredicate: 123, + equivalenceHashForCalPredicate: 123, + predicateKey: "GeneralPredicates", + cachedItem: predicateItemType{ + fit: true, + }, + expectedInvalidPredicateKey: false, + expectedPredicateItem: predicateItemType{ + fit: true, + reasons: []algorithm.PredicateFailureReason{}, + }, + }, + { + name: "test 3", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + nodeName: "node3", + equivalenceHashForUpdatePredicate: 123, + equivalenceHashForCalPredicate: 123, + predicateKey: "GeneralPredicates", + cachedItem: predicateItemType{ + fit: false, + reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, + }, + expectedInvalidPredicateKey: false, + expectedPredicateItem: predicateItemType{ + fit: false, + reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, + }, + }, + { + name: "test 4", + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "testPod"}}, + nodeName: "node4", + equivalenceHashForUpdatePredicate: 123, + equivalenceHashForCalPredicate: 456, + predicateKey: "GeneralPredicates", + cachedItem: predicateItemType{ + fit: false, + reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, + }, + expectedInvalidPredicateKey: false, + expectedInvalidEquivalenceHash: true, expectedPredicateItem: predicateItemType{ fit: false, reasons: []algorithm.PredicateFailureReason{}, @@ -107,18 +185,24 @@ func TestInvalidateCachedPredicateItem(t *testing.T) { fakeGetEquivalencePodFunc := func(pod *v1.Pod) interface{} { return nil } ecache := NewEquivalenceCache(fakeGetEquivalencePodFunc) // set cached item to equivalence cache - ecache.UpdateCachedPredicateItem(test.pod, test.nodeName, test.predicateKey, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHash) + ecache.UpdateCachedPredicateItem(test.pod, test.nodeName, test.predicateKey, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate) // if we want to do invalid, invalid the cached item - if test.expectedInvalid { + if test.expectedInvalidPredicateKey { predicateKeys := sets.NewString() predicateKeys.Insert(test.predicateKey) ecache.InvalidateCachedPredicateItem(test.nodeName, predicateKeys) } // calculate predicate with equivalence cache - fit, reasons, invalid := ecache.PredicateWithECache(test.pod, test.nodeName, test.predicateKey, test.equivalenceHash) - // returned invalid should match expectedInvalid - if invalid != test.expectedInvalid { - t.Errorf("Failed : %s, expected invalid: %v, but got: %v", test.name, test.expectedInvalid, invalid) + fit, reasons, invalid := ecache.PredicateWithECache(test.pod, test.nodeName, test.predicateKey, test.equivalenceHashForCalPredicate) + // returned invalid should match expectedInvalidPredicateKey or expectedInvalidEquivalenceHash + if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate { + if invalid != test.expectedInvalidEquivalenceHash { + t.Errorf("Failed : %s when using invalid equivalenceHash, expected invalid: %v, but got: %v", test.name, test.expectedInvalidEquivalenceHash, invalid) + } + } else { + if invalid != test.expectedInvalidPredicateKey { + t.Errorf("Failed : %s, expected invalid: %v, but got: %v", test.name, test.expectedInvalidPredicateKey, invalid) + } } // returned predicate result should match expected predicate item if fit != test.expectedPredicateItem.fit { diff --git a/plugin/pkg/scheduler/core/extender.go b/plugin/pkg/scheduler/core/extender.go index 6c24cf00cd3..898ef4f4525 100644 --- a/plugin/pkg/scheduler/core/extender.go +++ b/plugin/pkg/scheduler/core/extender.go @@ -20,8 +20,8 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" "net/http" + "strings" "time" "k8s.io/api/core/v1" @@ -229,7 +229,7 @@ func (h *HTTPExtender) send(action string, args interface{}, result interface{}) return err } - url := h.extenderURL + "/" + action + url := strings.TrimRight(h.extenderURL, "/") + "/" + action req, err := http.NewRequest("POST", url, bytes.NewReader(out)) if err != nil { @@ -242,19 +242,11 @@ func (h *HTTPExtender) send(action string, args interface{}, result interface{}) if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("Failed %v with extender at URL %v, code %v", action, h.extenderURL, resp.StatusCode) } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - if err := json.Unmarshal(body, result); err != nil { - return err - } - return nil + return json.NewDecoder(resp.Body).Decode(result) } diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index ea36e73bd95..68ab9b389eb 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -44,7 +44,7 @@ type FitError struct { var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods") -const NoNodeAvailableMsg = "No nodes are available that match all of the following predicates:" +const NoNodeAvailableMsg = "No nodes are available that match all of the following predicates" // Error returns detailed information of why the pod failed to fit on each node func (f *FitError) Error() string { diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/plugin/pkg/scheduler/core/generic_scheduler_test.go index 55e06bb0f6e..5255b12ff7e 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/core/generic_scheduler_test.go @@ -410,7 +410,7 @@ func TestHumanReadableFitError(t *testing.T) { "3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, }, } - if strings.Contains(error.Error(), "No nodes are available that match all of the following predicates") { + if strings.Contains(error.Error(), NoNodeAvailableMsg) { if strings.Contains(error.Error(), "NodeUnderDiskPressure (2)") && strings.Contains(error.Error(), "NodeUnderMemoryPressure (1)") { return } diff --git a/plugin/pkg/scheduler/factory/BUILD b/plugin/pkg/scheduler/factory/BUILD index b15b4e885b8..8cc5d9d9442 100644 --- a/plugin/pkg/scheduler/factory/BUILD +++ b/plugin/pkg/scheduler/factory/BUILD @@ -57,7 +57,6 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index 6bc4c225b66..290ab049416 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -30,7 +30,6 @@ import ( "k8s.io/client-go/tools/cache" utiltesting "k8s.io/client-go/util/testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" apitesting "k8s.io/kubernetes/pkg/api/testing" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" @@ -244,13 +243,13 @@ func TestDefaultErrorFunc(t *testing.T) { } handler := utiltesting.FakeHandler{ StatusCode: 200, - ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), testPod), + ResponseBody: runtime.EncodeOrDie(util.Test.Codec(), testPod), T: t, } mux := http.NewServeMux() // FakeHandler musn't be sent requests other than the one you want to test. - mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler) + mux.Handle(util.Test.ResourcePath("pods", "bar", "foo"), &handler) server := httptest.NewServer(mux) defer server.Close() client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) @@ -282,7 +281,7 @@ func TestDefaultErrorFunc(t *testing.T) { if !exists { continue } - handler.ValidateRequest(t, testapi.Default.ResourcePath("pods", "bar", "foo"), "GET", nil) + handler.ValidateRequest(t, util.Test.ResourcePath("pods", "bar", "foo"), "GET", nil) if e, a := testPod, got; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } @@ -344,9 +343,9 @@ func TestBind(t *testing.T) { t.Errorf("Unexpected error: %v", err) continue } - expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding) + expectedBody := runtime.EncodeOrDie(util.Test.Codec(), item.binding) handler.ValidateRequest(t, - testapi.Default.SubResourcePath("pods", metav1.NamespaceDefault, "foo", "binding"), + util.Test.SubResourcePath("pods", metav1.NamespaceDefault, "foo", "binding"), "POST", &expectedBody) } } diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index 9105f79a98d..9e67bfc0b75 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "fmt" "time" "k8s.io/api/core/v1" @@ -36,8 +37,6 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/util" - "fmt" - "github.com/golang/glog" ) @@ -220,9 +219,6 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { // If binding succeeded then PodScheduled condition will be updated in apiserver so that // it's atomic with setting host. err := sched.config.Binder.Bind(b) - if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil { - return fmt.Errorf("scheduler cache FinishBinding failed: %v", err) - } if err != nil { glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name) if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil { @@ -237,6 +233,11 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { }) return err } + + if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil { + return fmt.Errorf("scheduler cache FinishBinding failed: %v", err) + } + metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", assumed.Name, b.Target.Name) return nil diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 069ce956290..e5195440d94 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -33,12 +33,12 @@ import ( clientcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/core" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) type fakeBinder struct { @@ -55,7 +55,7 @@ func (fc fakePodConditionUpdater) Update(pod *v1.Pod, podCondition *v1.PodCondit func podWithID(id, desiredHost string) *v1.Pod { return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)}, + ObjectMeta: metav1.ObjectMeta{Name: id, SelfLink: util.Test.SelfLink("pods", id)}, Spec: v1.PodSpec{ NodeName: desiredHost, }, @@ -65,7 +65,7 @@ func podWithID(id, desiredHost string) *v1.Pod { func deletingPod(id string) *v1.Pod { deletionTimestamp := metav1.Now() return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id), DeletionTimestamp: &deletionTimestamp}, + ObjectMeta: metav1.ObjectMeta{Name: id, SelfLink: util.Test.SelfLink("pods", id), DeletionTimestamp: &deletionTimestamp}, Spec: v1.PodSpec{ NodeName: "", }, diff --git a/plugin/pkg/scheduler/schedulercache/BUILD b/plugin/pkg/scheduler/schedulercache/BUILD index 8b9ba120d28..ce68ac02f72 100644 --- a/plugin/pkg/scheduler/schedulercache/BUILD +++ b/plugin/pkg/scheduler/schedulercache/BUILD @@ -35,7 +35,6 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/api/v1/helper:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/plugin/pkg/scheduler/schedulercache/cache_test.go b/plugin/pkg/scheduler/schedulercache/cache_test.go index 549a41edddb..ea59ea42cec 100644 --- a/plugin/pkg/scheduler/schedulercache/cache_test.go +++ b/plugin/pkg/scheduler/schedulercache/cache_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) @@ -507,76 +506,32 @@ func TestForgetPod(t *testing.T) { } } -// addResource adds ResourceList into Resource. -func addResource(r *Resource, rl v1.ResourceList) { - if r == nil { - return - } - - for rName, rQuant := range rl { - switch rName { - case v1.ResourceCPU: - r.MilliCPU += rQuant.MilliValue() - case v1.ResourceMemory: - r.Memory += rQuant.Value() - case v1.ResourceNvidiaGPU: - r.NvidiaGPU += rQuant.Value() - default: - if v1helper.IsOpaqueIntResourceName(rName) { - r.AddOpaque(rName, rQuant.Value()) - } - } - } -} - // getResourceRequest returns the resource request of all containers in Pods; // excuding initContainers. func getResourceRequest(pod *v1.Pod) v1.ResourceList { result := &Resource{} for _, container := range pod.Spec.Containers { - addResource(result, container.Resources.Requests) + result.Add(container.Resources.Requests) } return result.ResourceList() } -// newResource returns a new Resource by ResourceList. -func newResource(rl v1.ResourceList) *Resource { - res := &Resource{} - - for rName, rQuantity := range rl { - switch rName { - case v1.ResourceMemory: - res.Memory = rQuantity.Value() - case v1.ResourceCPU: - res.MilliCPU = rQuantity.MilliValue() - case v1.ResourceNvidiaGPU: - res.NvidiaGPU += rQuantity.Value() - default: - if v1helper.IsOpaqueIntResourceName(rName) { - res.SetOpaque(rName, rQuantity.Value()) - } - } - } - - return res -} - // buildNodeInfo creates a NodeInfo by simulating node operations in cache. func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo { expected := NewNodeInfo() // Simulate SetNode. expected.node = node - expected.allocatableResource = newResource(node.Status.Allocatable) + expected.allocatableResource = NewResource(node.Status.Allocatable) expected.taints = node.Spec.Taints expected.generation++ for _, pod := range pods { // Simulate AddPod expected.pods = append(expected.pods, pod) - addResource(expected.requestedResource, getResourceRequest(pod)) - addResource(expected.nonzeroRequest, getResourceRequest(pod)) + expected.requestedResource.Add(getResourceRequest(pod)) + expected.nonzeroRequest.Add(getResourceRequest(pod)) expected.usedPorts = schedutil.GetUsedPorts(pod) expected.generation++ } diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/plugin/pkg/scheduler/schedulercache/node_info.go index 386822adffc..5d144f69f9f 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/plugin/pkg/scheduler/schedulercache/node_info.go @@ -47,9 +47,6 @@ type NodeInfo struct { // We store allocatedResources (which is Node.Status.Allocatable.*) explicitly // as int64, to avoid conversions and accessing map. allocatableResource *Resource - // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) - // explicitly as int, to avoid conversions and improve performance. - allowedPodNumber int // Cached tains of the node for faster lookup. taints []v1.Taint @@ -66,20 +63,60 @@ type NodeInfo struct { // Resource is a collection of compute resource. type Resource struct { - MilliCPU int64 - Memory int64 - NvidiaGPU int64 - StorageScratch int64 - StorageOverlay int64 + MilliCPU int64 + Memory int64 + NvidiaGPU int64 + StorageScratch int64 + StorageOverlay int64 + // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) + // explicitly as int, to avoid conversions and improve performance. + AllowedPodNumber int OpaqueIntResources map[v1.ResourceName]int64 } +// New creates a Resource from ResourceList +func NewResource(rl v1.ResourceList) *Resource { + r := &Resource{} + r.Add(rl) + return r +} + +// Add adds ResourceList into Resource. +func (r *Resource) Add(rl v1.ResourceList) { + if r == nil { + return + } + + for rName, rQuant := range rl { + switch rName { + case v1.ResourceCPU: + r.MilliCPU += rQuant.MilliValue() + case v1.ResourceMemory: + r.Memory += rQuant.Value() + case v1.ResourceNvidiaGPU: + r.NvidiaGPU += rQuant.Value() + case v1.ResourcePods: + r.AllowedPodNumber += int(rQuant.Value()) + case v1.ResourceStorage: + r.StorageScratch += rQuant.Value() + case v1.ResourceStorageOverlay: + r.StorageOverlay += rQuant.Value() + default: + if v1helper.IsOpaqueIntResourceName(rName) { + r.AddOpaque(rName, rQuant.Value()) + } + } + } +} + func (r *Resource) ResourceList() v1.ResourceList { result := v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI), + v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI), v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI), + v1.ResourceStorage: *resource.NewQuantity(r.StorageScratch, resource.BinarySI), } for rName, rQuant := range r.OpaqueIntResources { result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) @@ -89,11 +126,12 @@ func (r *Resource) ResourceList() v1.ResourceList { func (r *Resource) Clone() *Resource { res := &Resource{ - MilliCPU: r.MilliCPU, - Memory: r.Memory, - NvidiaGPU: r.NvidiaGPU, - StorageOverlay: r.StorageOverlay, - StorageScratch: r.StorageScratch, + MilliCPU: r.MilliCPU, + Memory: r.Memory, + NvidiaGPU: r.NvidiaGPU, + AllowedPodNumber: r.AllowedPodNumber, + StorageOverlay: r.StorageOverlay, + StorageScratch: r.StorageScratch, } if r.OpaqueIntResources != nil { res.OpaqueIntResources = make(map[v1.ResourceName]int64) @@ -124,7 +162,6 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { requestedResource: &Resource{}, nonzeroRequest: &Resource{}, allocatableResource: &Resource{}, - allowedPodNumber: 0, generation: 0, usedPorts: make(map[int]bool), } @@ -166,10 +203,10 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod { } func (n *NodeInfo) AllowedPodNumber() int { - if n == nil { + if n == nil || n.allocatableResource == nil { return 0 } - return n.allowedPodNumber + return n.allocatableResource.AllowedPodNumber } func (n *NodeInfo) Taints() ([]v1.Taint, error) { @@ -223,7 +260,6 @@ func (n *NodeInfo) Clone() *NodeInfo { requestedResource: n.requestedResource.Clone(), nonzeroRequest: n.nonzeroRequest.Clone(), allocatableResource: n.allocatableResource.Clone(), - allowedPodNumber: n.allowedPodNumber, taintsErr: n.taintsErr, memoryPressureCondition: n.memoryPressureCondition, diskPressureCondition: n.diskPressureCondition, @@ -253,7 +289,8 @@ func (n *NodeInfo) String() string { for i, pod := range n.pods { podKeys[i] = pod.Name } - return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v}", podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts) + return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}", + podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource) } func hasPodAffinityConstraints(pod *v1.Pod) bool { @@ -345,23 +382,9 @@ func (n *NodeInfo) removePod(pod *v1.Pod) error { } func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) { + resPtr := &res for _, c := range pod.Spec.Containers { - for rName, rQuant := range c.Resources.Requests { - switch rName { - case v1.ResourceCPU: - res.MilliCPU += rQuant.MilliValue() - case v1.ResourceMemory: - res.Memory += rQuant.Value() - case v1.ResourceNvidiaGPU: - res.NvidiaGPU += rQuant.Value() - case v1.ResourceStorageOverlay: - res.StorageOverlay += rQuant.Value() - default: - if v1helper.IsOpaqueIntResourceName(rName) { - res.AddOpaque(rName, rQuant.Value()) - } - } - } + resPtr.Add(c.Resources.Requests) non0_cpu_req, non0_mem_req := priorityutil.GetNonzeroRequests(&c.Resources.Requests) non0_cpu += non0_cpu_req @@ -397,26 +420,9 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, used bool) { // Sets the overall node information. func (n *NodeInfo) SetNode(node *v1.Node) error { n.node = node - for rName, rQuant := range node.Status.Allocatable { - switch rName { - case v1.ResourceCPU: - n.allocatableResource.MilliCPU = rQuant.MilliValue() - case v1.ResourceMemory: - n.allocatableResource.Memory = rQuant.Value() - case v1.ResourceNvidiaGPU: - n.allocatableResource.NvidiaGPU = rQuant.Value() - case v1.ResourcePods: - n.allowedPodNumber = int(rQuant.Value()) - case v1.ResourceStorage: - n.allocatableResource.StorageScratch = rQuant.Value() - case v1.ResourceStorageOverlay: - n.allocatableResource.StorageOverlay = rQuant.Value() - default: - if v1helper.IsOpaqueIntResourceName(rName) { - n.allocatableResource.SetOpaque(rName, rQuant.Value()) - } - } - } + + n.allocatableResource = NewResource(node.Status.Allocatable) + n.taints = node.Spec.Taints for i := range node.Status.Conditions { cond := &node.Status.Conditions[i] @@ -441,7 +447,6 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error { // node removal. This is handled correctly in cache.go file. n.node = nil n.allocatableResource = &Resource{} - n.allowedPodNumber = 0 n.taints, n.taintsErr = nil, nil n.memoryPressureCondition = v1.ConditionUnknown n.diskPressureCondition = v1.ConditionUnknown diff --git a/plugin/pkg/scheduler/util/BUILD b/plugin/pkg/scheduler/util/BUILD index 824f7d5bef8..7bba7524005 100644 --- a/plugin/pkg/scheduler/util/BUILD +++ b/plugin/pkg/scheduler/util/BUILD @@ -20,12 +20,17 @@ go_library( name = "go_default_library", srcs = [ "backoff_utils.go", + "testutil.go", "utils.go", ], tags = ["automanaged"], deps = [ + "//pkg/api:go_default_library", + "//pkg/api/install:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) diff --git a/plugin/pkg/scheduler/util/backoff_utils.go b/plugin/pkg/scheduler/util/backoff_utils.go index 90566bbfbaf..9226f4cca5b 100644 --- a/plugin/pkg/scheduler/util/backoff_utils.go +++ b/plugin/pkg/scheduler/util/backoff_utils.go @@ -18,11 +18,12 @@ package util import ( "fmt" - "github.com/golang/glog" - ktypes "k8s.io/apimachinery/pkg/types" "sync" "sync/atomic" "time" + + "github.com/golang/glog" + ktypes "k8s.io/apimachinery/pkg/types" ) type clock interface { @@ -74,7 +75,7 @@ func (entry *backoffEntry) getBackoff(maxDuration time.Duration) time.Duration { newDuration = maxDuration } entry.backoff = newDuration - glog.V(4).Infof("Backing off %s for pod %+v", duration.String(), entry) + glog.V(4).Infof("Backing off %s", duration.String()) return duration } diff --git a/plugin/pkg/scheduler/util/testutil.go b/plugin/pkg/scheduler/util/testutil.go new file mode 100644 index 00000000000..8b5ef89e830 --- /dev/null +++ b/plugin/pkg/scheduler/util/testutil.go @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "mime" + "os" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/api" + + _ "k8s.io/kubernetes/pkg/api/install" +) + +type TestGroup struct { + externalGroupVersion schema.GroupVersion + internalGroupVersion schema.GroupVersion + internalTypes map[string]reflect.Type + externalTypes map[string]reflect.Type +} + +var ( + Groups = make(map[string]TestGroup) + Test TestGroup + + serializer runtime.SerializerInfo +) + +func init() { + if apiMediaType := os.Getenv("KUBE_TEST_API_TYPE"); len(apiMediaType) > 0 { + var ok bool + mediaType, _, err := mime.ParseMediaType(apiMediaType) + if err != nil { + panic(err) + } + serializer, ok = runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + panic(fmt.Sprintf("no serializer for %s", apiMediaType)) + } + } + + kubeTestAPI := os.Getenv("KUBE_TEST_API") + if len(kubeTestAPI) != 0 { + // priority is "first in list preferred", so this has to run in reverse order + testGroupVersions := strings.Split(kubeTestAPI, ",") + for i := len(testGroupVersions) - 1; i >= 0; i-- { + gvString := testGroupVersions[i] + groupVersion, err := schema.ParseGroupVersion(gvString) + if err != nil { + panic(fmt.Sprintf("Error parsing groupversion %v: %v", gvString, err)) + } + + internalGroupVersion := schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal} + Groups[groupVersion.Group] = TestGroup{ + externalGroupVersion: groupVersion, + internalGroupVersion: internalGroupVersion, + internalTypes: api.Scheme.KnownTypes(internalGroupVersion), + externalTypes: api.Scheme.KnownTypes(groupVersion), + } + } + } + + if _, ok := Groups[api.GroupName]; !ok { + externalGroupVersion := schema.GroupVersion{Group: api.GroupName, Version: api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version} + Groups[api.GroupName] = TestGroup{ + externalGroupVersion: externalGroupVersion, + internalGroupVersion: api.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion), + externalTypes: api.Scheme.KnownTypes(externalGroupVersion), + } + } + + Test = Groups[api.GroupName] +} + +// Codec returns the codec for the API version to test against, as set by the +// KUBE_TEST_API_TYPE env var. +func (g TestGroup) Codec() runtime.Codec { + if serializer.Serializer == nil { + return api.Codecs.LegacyCodec(g.externalGroupVersion) + } + return api.Codecs.CodecForVersions(serializer.Serializer, api.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) +} + +// SelfLink returns a self link that will appear to be for the version Version(). +// 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be +// empty for lists. +func (g TestGroup) SelfLink(resource, name string) string { + if g.externalGroupVersion.Group == api.GroupName { + if name == "" { + return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) + } + return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) + } + + // TODO: will need a /apis prefix once we have proper multi-group + // support + if name == "" { + return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) + } + return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) +} + +// ResourcePathWithPrefix returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. +// For ex, this is of the form: +// /api/v1/watch/namespaces/foo/pods/pod0 for v1. +func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { + var path string + if g.externalGroupVersion.Group == api.GroupName { + path = "/api/" + g.externalGroupVersion.Version + } else { + // TODO: switch back once we have proper multiple group support + // path = "/apis/" + g.Group + "/" + Version(group...) + path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version + } + + if prefix != "" { + path = path + "/" + prefix + } + if namespace != "" { + path = path + "/namespaces/" + namespace + } + // Resource names are lower case. + resource = strings.ToLower(resource) + if resource != "" { + path = path + "/" + resource + } + if name != "" { + path = path + "/" + name + } + return path +} + +// ResourcePath returns the appropriate path for the given resource, namespace and name. +// For example, this is of the form: +// /api/v1/namespaces/foo/pods/pod0 for v1. +func (g TestGroup) ResourcePath(resource, namespace, name string) string { + return g.ResourcePathWithPrefix("", resource, namespace, name) +} + +// SubResourcePath returns the appropriate path for the given resource, namespace, +// name and subresource. +func (g TestGroup) SubResourcePath(resource, namespace, name, sub string) string { + path := g.ResourcePathWithPrefix("", resource, namespace, name) + if sub != "" { + path = path + "/" + sub + } + + return path +} diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 33774e0d8d0..4e0bdb3b486 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -4606,7 +4606,7 @@ const ( // Enable TTY for remote command execution ExecTTYParam = "tty" // Command to run for remote command execution - ExecCommandParamm = "command" + ExecCommandParam = "command" // Name of header that specifies stream type StreamType = "streamType" diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 709cfb456b8..06b8366c983 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -10,6 +10,10 @@ "ImportPath": "bitbucket.org/ww/goautoneg", "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" }, + { + "ImportPath": "github.com/NYTimes/gziphandler", + "Rev": "56545f4a5d46df9a6648819d1664c3a03a13ffdb" + }, { "ImportPath": "github.com/PuerkitoBio/purell", "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" @@ -80,7 +84,7 @@ }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", @@ -106,10 +110,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/analysis", - "Rev": "b44dc874b601d9e4e2f6e19140e794ba24bead3b" - }, { "ImportPath": "github.com/go-openapi/jsonpointer", "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" @@ -118,10 +118,6 @@ "ImportPath": "github.com/go-openapi/jsonreference", "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" }, - { - "ImportPath": "github.com/go-openapi/loads", - "Rev": "18441dfa706d924a39a030ee2c3b1d8d81917b38" - }, { "ImportPath": "github.com/go-openapi/spec", "Rev": "6aced65f8501fe1217321abf0749d354824ba2ff" @@ -238,6 +234,10 @@ "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" }, + { + "ImportPath": "github.com/mxk/go-flowrate/flowrate", + "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" + }, { "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" @@ -288,11 +288,11 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/stretchr/testify/require", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/ugorji/go/codec", diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md index 4468e616ba7..3ac247f4a1f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md @@ -43,4 +43,10 @@ type User struct { Name string `json:"name"` Password string `json:"password"` } -``` \ No newline at end of file +``` + +## Cleanup + +Successfully running this program will clean the created artifacts. If you terminate the program without completing, you can clean up the created CustomResourceDefinition with: + + kubectl delete crd examples.cr.client-go.k8s.io diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/BUILD b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/BUILD index f00e0752f94..d8595828255 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/BUILD @@ -25,12 +25,15 @@ go_test( go_library( name = "go_default_library", srcs = [ + "doc.go", "register.go", "types.go", + "zz_generated.deepcopy.go", ], tags = ["automanaged"], deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/pkg/util/uuid/doc.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/doc.go similarity index 79% rename from pkg/util/uuid/doc.go rename to staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/doc.go index 51afc2e6e15..be4d74fc2a1 100644 --- a/pkg/util/uuid/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/doc.go @@ -14,6 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// uuid contains uuid generation helpers. -// WARNING: this will go away when all godeps which vendor us are ported to the new package. -package uuid +// +k8s:deepcopy-gen=package +package v1 diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..7f13ff474b9 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/apis/cr/v1/zz_generated.deepcopy.go @@ -0,0 +1,93 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_v1_Example, InType: reflect.TypeOf(&Example{})}, + {Fn: DeepCopy_v1_ExampleList, InType: reflect.TypeOf(&ExampleList{})}, + {Fn: DeepCopy_v1_ExampleSpec, InType: reflect.TypeOf(&ExampleSpec{})}, + {Fn: DeepCopy_v1_ExampleStatus, InType: reflect.TypeOf(&ExampleStatus{})}, + } +} + +// DeepCopy_v1_Example is an autogenerated deepcopy function. +func DeepCopy_v1_Example(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Example) + out := out.(*Example) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +// DeepCopy_v1_ExampleList is an autogenerated deepcopy function. +func DeepCopy_v1_ExampleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExampleList) + out := out.(*ExampleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Example, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*Example) + } + } + } + return nil + } +} + +// DeepCopy_v1_ExampleSpec is an autogenerated deepcopy function. +func DeepCopy_v1_ExampleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExampleSpec) + out := out.(*ExampleSpec) + *out = *in + return nil + } +} + +// DeepCopy_v1_ExampleStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ExampleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExampleStatus) + out := out.(*ExampleStatus) + *out = *in + return nil + } +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/main.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/main.go index 9ed7c2f012b..2d50a83d52b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/main.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/main.go @@ -110,7 +110,7 @@ func main() { } fmt.Print("PROCESSED\n") - // Fetch a list of our TPRs + // Fetch a list of our CRs exampleList := crv1.ExampleList{} err = exampleClient.Get().Resource(crv1.ExampleResourcePlural).Do().Into(&exampleList) if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go index c9cfc9401ff..12634870431 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go @@ -69,7 +69,8 @@ func TestFinalization(t *testing.T) { require.NoError(t, err) // Removing the finalizers to allow the following delete remove the object. - // This step will fail if previous delete wrongly removed the object. + // This step will fail if previous delete wrongly removed the object. The + // object will be deleted as part of the finalizer update. for { gottenNoxuInstance.SetFinalizers(nil) _, err = noxuResourceClient.Update(gottenNoxuInstance) @@ -83,14 +84,6 @@ func TestFinalization(t *testing.T) { require.NoError(t, err) } - // Now when finalizer is not there it should be possible to actually remove the object from the server. - err = noxuResourceClient.Delete(name, &metav1.DeleteOptions{ - Preconditions: &metav1.Preconditions{ - UID: &uid, - }, - }) - require.NoError(t, err) - // Check that the object is actually gone. _, err = noxuResourceClient.Get(name, metav1.GetOptions{}) require.Error(t, err) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index def79f6630d..4fe6471873a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -389,7 +389,7 @@ func TestEtcdStorage(t *testing.T) { Metadata: Metadata{ Name: "noxus.mygroup.example.com", Namespace: "", - SelfLink: "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/noxus.mygroup.example.com", + SelfLink: "", }, }, }, @@ -414,7 +414,7 @@ func TestEtcdStorage(t *testing.T) { Metadata: Metadata{ Name: "curlets.mygroup.example.com", Namespace: "", - SelfLink: "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/curlets.mygroup.example.com", + SelfLink: "", }, }, }, diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index a737f0bf90d..6dbab2d1074 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -16,7 +16,7 @@ }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { "ImportPath": "github.com/docker/spdystream", @@ -106,18 +106,42 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" }, + { + "ImportPath": "github.com/mxk/go-flowrate/flowrate", + "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" + }, { "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, + { + "ImportPath": "github.com/pmezard/go-difflib/difflib", + "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" }, + { + "ImportPath": "github.com/stretchr/testify/assert", + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" + }, + { + "ImportPath": "github.com/stretchr/testify/require", + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" + }, { "ImportPath": "github.com/ugorji/go/codec", "Rev": "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74" }, + { + "ImportPath": "golang.org/x/net/html", + "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" + }, + { + "ImportPath": "golang.org/x/net/html/atom", + "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" + }, { "ImportPath": "golang.org/x/net/http2", "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" @@ -134,6 +158,10 @@ "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" }, + { + "ImportPath": "golang.org/x/net/websocket", + "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" + }, { "ImportPath": "golang.org/x/text/cases", "Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" diff --git a/staging/src/k8s.io/apimachinery/OWNERS b/staging/src/k8s.io/apimachinery/OWNERS index 2719ec92049..7069eeb0b42 100644 --- a/staging/src/k8s.io/apimachinery/OWNERS +++ b/staging/src/k8s.io/apimachinery/OWNERS @@ -18,4 +18,4 @@ reviewers: - gmarek - sttts - ncdc -- timstclair +- tallclair diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS b/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS index e664b2015b8..ff82254f831 100755 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -14,7 +14,7 @@ reviewers: - erictune - saad-ali - janetkuo -- timstclair +- tallclair - eparis - timothysc - dims diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go index 9e0fb152ade..c70b3d2b6c7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go @@ -67,6 +67,9 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.EachListItem(fn) + } // TODO: Change to an interface call? itemsPtr, err := GetItemsPtr(obj) if err != nil { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS b/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS index b905e57f0f6..342ff29145b 100755 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -7,7 +7,7 @@ reviewers: - mikedanese - saad-ali - janetkuo -- timstclair +- tallclair - eparis - timothysc - jbeda diff --git a/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta_test.go b/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta_test.go index a8041d7fec1..9ec73b040c8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta_test.go @@ -102,8 +102,8 @@ func TestValidateObjectMetaOwnerReferences(t *testing.T) { description: "simple success - third party extension.", ownerReferences: []metav1.OwnerReference{ { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "1", }, @@ -128,29 +128,29 @@ func TestValidateObjectMetaOwnerReferences(t *testing.T) { description: "simple controller ref success - one reference with Controller set", ownerReferences: []metav1.OwnerReference{ { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "1", Controller: &falseVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "2", Controller: &trueVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "3", Controller: &falseVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "4", }, @@ -162,29 +162,29 @@ func TestValidateObjectMetaOwnerReferences(t *testing.T) { description: "simple controller ref failure - two references with Controller set", ownerReferences: []metav1.OwnerReference{ { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "1", Controller: &falseVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "2", Controller: &trueVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "3", Controller: &trueVar, }, { - APIVersion: "thirdpartyVersion", - Kind: "thirdpartyKind", + APIVersion: "customresourceVersion", + Kind: "customresourceKind", Name: "name", UID: "4", }, diff --git a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go index f2e32c88cdf..5150db01659 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -42,10 +42,6 @@ type APIRegistrationManager struct { // registeredGroupVersions stores all API group versions for which RegisterGroup is called. registeredVersions map[schema.GroupVersion]struct{} - // thirdPartyGroupVersions are API versions which are dynamically - // registered (and unregistered) via API calls to the apiserver - thirdPartyGroupVersions []schema.GroupVersion - // enabledVersions represents all enabled API versions. It should be a // subset of registeredVersions. Please call EnableVersions() to add // enabled versions. @@ -66,11 +62,10 @@ type APIRegistrationManager struct { // wish to test. func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { m := &APIRegistrationManager{ - registeredVersions: map[schema.GroupVersion]struct{}{}, - thirdPartyGroupVersions: []schema.GroupVersion{}, - enabledVersions: map[schema.GroupVersion]struct{}{}, - groupMetaMap: map[string]*apimachinery.GroupMeta{}, - envRequestedVersions: []schema.GroupVersion{}, + registeredVersions: map[schema.GroupVersion]struct{}{}, + enabledVersions: map[schema.GroupVersion]struct{}{}, + groupMetaMap: map[string]*apimachinery.GroupMeta{}, + envRequestedVersions: []schema.GroupVersion{}, } if len(kubeAPIVersions) != 0 { @@ -211,41 +206,6 @@ func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion return ret } -// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. -func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv schema.GroupVersion) bool { - for ix := range m.thirdPartyGroupVersions { - if m.thirdPartyGroupVersions[ix] == gv { - return true - } - } - return false -} - -// AddThirdPartyAPIGroupVersions sets the list of third party versions, -// registers them in the API machinery and enables them. -// Skips GroupVersions that are already registered. -// Returns the list of GroupVersions that were skipped. -func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...schema.GroupVersion) []schema.GroupVersion { - filteredGVs := []schema.GroupVersion{} - skippedGVs := []schema.GroupVersion{} - for ix := range gvs { - if !m.IsRegisteredVersion(gvs[ix]) { - filteredGVs = append(filteredGVs, gvs[ix]) - } else { - glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) - skippedGVs = append(skippedGVs, gvs[ix]) - } - } - if len(filteredGVs) == 0 { - return skippedGVs - } - m.RegisterVersions(filteredGVs) - m.EnableVersions(filteredGVs...) - m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) - - return skippedGVs -} - // InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { groupMeta, err := m.Group(version.Group) diff --git a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered_test.go b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered_test.go index 37d54d27f79..58fc0173e9c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/registered_test.go @@ -23,83 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -func TestAddThirdPartyVersionsBasic(t *testing.T) { - m, err := NewAPIRegistrationManager("") - if err != nil { - t.Fatalf("Unexpected failure to make a manager: %v", err) - } - - registered := []schema.GroupVersion{ - { - Group: "", - Version: "v1", - }, - } - skipped := registered - thirdParty := []schema.GroupVersion{ - { - Group: "company.com", - Version: "v1", - }, - { - Group: "company.com", - Version: "v2", - }, - } - gvs := append(registered, thirdParty...) - - m.RegisterVersions(registered) - wasSkipped := m.AddThirdPartyAPIGroupVersions(gvs...) - if len(wasSkipped) != len(skipped) { - t.Errorf("Expected %v, found %v", skipped, wasSkipped) - } - for ix := range wasSkipped { - found := false - for _, gv := range skipped { - if gv.String() == wasSkipped[ix].String() { - found = true - break - } - } - if !found { - t.Errorf("Couldn't find %v in %v", wasSkipped[ix], skipped) - } - } - for _, gv := range thirdParty { - if !m.IsThirdPartyAPIGroupVersion(gv) { - t.Errorf("Expected %v to be third party.", gv) - } - } -} - -func TestAddThirdPartyVersionsMultiple(t *testing.T) { - thirdParty := []schema.GroupVersion{ - { - Group: "company.com", - Version: "v1", - }, - { - Group: "company.com", - Version: "v2", - }, - } - m, err := NewAPIRegistrationManager("") - if err != nil { - t.Fatalf("Unexpected failure to make a manager: %v", err) - } - for _, gv := range thirdParty { - wasSkipped := m.AddThirdPartyAPIGroupVersions(gv) - if len(wasSkipped) != 0 { - t.Errorf("Expected length 0, found %v", wasSkipped) - } - } - for _, gv := range thirdParty { - if !m.IsThirdPartyAPIGroupVersion(gv) { - t.Errorf("Expected %v to be third party.", gv) - } - } -} - func TestAllPreferredGroupVersions(t *testing.T) { testCases := []struct { groupMetas []apimachinery.GroupMeta diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 2ce980fa757..6e91c751e16 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -11,6 +11,7 @@ load( go_test( name = "go_default_test", srcs = [ + "controller_ref_test.go", "duration_test.go", "group_version_test.go", "helpers_test.go", @@ -25,12 +26,14 @@ go_test( "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) go_library( name = "go_default_library", srcs = [ + "controller_ref.go", "conversion.go", "doc.go", "duration.go", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go new file mode 100644 index 00000000000..042cd5b9c55 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsControlledBy checks if the object has a controllerRef set to the given owner +func IsControlledBy(obj Object, owner Object) bool { + ref := GetControllerOf(obj) + if ref == nil { + return false + } + return ref.UID == owner.GetUID() +} + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +func GetControllerOf(controllee Object) *OwnerReference { + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref + } + } + return nil +} + +// NewControllerRef creates an OwnerReference pointing to the given owner. +func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { + blockOwnerDeletion := true + isController := true + return &OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref_test.go new file mode 100644 index 00000000000..add764a331f --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref_test.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type metaObj struct { + ObjectMeta + TypeMeta +} + +func TestNewControllerRef(t *testing.T) { + gvk := schema.GroupVersionKind{ + Group: "group", + Version: "v1", + Kind: "Kind", + } + obj1 := &metaObj{ + ObjectMeta: ObjectMeta{ + Name: "name", + UID: "uid1", + }, + } + controllerRef := NewControllerRef(obj1, gvk) + if controllerRef.UID != obj1.UID { + t.Errorf("Incorrect UID: %s", controllerRef.UID) + } + if controllerRef.Controller == nil || *controllerRef.Controller != true { + t.Error("Controller must be set to true") + } + if controllerRef.BlockOwnerDeletion == nil || *controllerRef.BlockOwnerDeletion != true { + t.Error("BlockOwnerDeletion must be set to true") + } + if controllerRef.APIVersion == "" || + controllerRef.Kind == "" || + controllerRef.Name == "" { + t.Errorf("All controllerRef fields must be set: %v", controllerRef) + } +} + +func TestGetControllerOf(t *testing.T) { + gvk := schema.GroupVersionKind{ + Group: "group", + Version: "v1", + Kind: "Kind", + } + obj1 := &metaObj{ + ObjectMeta: ObjectMeta{ + UID: "uid1", + Name: "name1", + }, + } + controllerRef := NewControllerRef(obj1, gvk) + var falseRef = false + obj2 := &metaObj{ + ObjectMeta: ObjectMeta{ + UID: "uid2", + Name: "name1", + OwnerReferences: []OwnerReference{ + { + Name: "owner1", + Controller: &falseRef, + }, + *controllerRef, + { + Name: "owner2", + Controller: &falseRef, + }, + }, + }, + } + + if GetControllerOf(obj1) != nil { + t.Error("GetControllerOf must return null") + } + c := GetControllerOf(obj2) + if c.Name != controllerRef.Name || c.UID != controllerRef.UID { + t.Errorf("Incorrect result of GetControllerOf: %v", c) + } +} + +func TestIsControlledBy(t *testing.T) { + gvk := schema.GroupVersionKind{ + Group: "group", + Version: "v1", + Kind: "Kind", + } + obj1 := &metaObj{ + ObjectMeta: ObjectMeta{ + UID: "uid1", + }, + } + obj2 := &metaObj{ + ObjectMeta: ObjectMeta{ + UID: "uid2", + OwnerReferences: []OwnerReference{ + *NewControllerRef(obj1, gvk), + }, + }, + } + obj3 := &metaObj{ + ObjectMeta: ObjectMeta{ + UID: "uid3", + OwnerReferences: []OwnerReference{ + *NewControllerRef(obj2, gvk), + }, + }, + } + if !IsControlledBy(obj2, obj1) || !IsControlledBy(obj3, obj2) { + t.Error("Incorrect IsControlledBy result: false") + } + if IsControlledBy(obj3, obj1) { + t.Error("Incorrect IsControlledBy result: true") + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index fed687e9bd3..5d1aeb0bcb9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -69,6 +69,39 @@ func (obj *Unstructured) IsList() bool { } func (obj *UnstructuredList) IsList() bool { return true } +func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { + if obj.Object == nil { + return fmt.Errorf("content is not a list") + } + field, ok := obj.Object["items"] + if !ok { + return fmt.Errorf("content is not a list") + } + items, ok := field.([]interface{}) + if !ok { + return nil + } + for _, item := range items { + child, ok := item.(map[string]interface{}) + if !ok { + return fmt.Errorf("items member is not an object") + } + if err := fn(&Unstructured{Object: child}); err != nil { + return err + } + } + return nil +} + +func (obj *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range obj.Items { + if err := fn(&obj.Items[i]); err != nil { + return err + } + } + return nil +} + func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { obj.Object = make(map[string]interface{}) @@ -136,10 +169,15 @@ func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { } func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - if str, ok := getNestedField(obj, fields...).(*int64); ok { - return str + nested := getNestedField(obj, fields...) + switch n := nested.(type) { + case int64: + return &n + case *int64: + return n + default: + return nil } - return nil } func getNestedSlice(obj map[string]interface{}, fields ...string) []string { @@ -470,12 +508,15 @@ func (u *Unstructured) GetInitializers() *metav1.Initializers { } func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } if initializers == nil { setNestedField(u.Object, nil, "metadata", "initializers") return } - out := make(map[string]interface{}) - if err := converter.ToUnstructured(initializers, &out); err != nil { + out, err := converter.ToUnstructured(initializers) + if err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) } setNestedField(u.Object, out, "metadata", "initializers") diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/BUILD new file mode 100644 index 00000000000..6e2604388ad --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/BUILD @@ -0,0 +1,25 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/pkg/util/rand/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/doc.go similarity index 65% rename from pkg/util/rand/doc.go rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/doc.go index 7ba225434da..f8c455eecb0 100644 --- a/pkg/util/rand/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package rand needed for godep. -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package rand +// +k8s:deepcopy-gen=package,register +// +groupName=testapigroup.apimachinery.k8s.io +// +// package testapigroup contains an testapigroup API used to demonstrate how to create api groups. Moreover, this is +// used within tests. +package testapigroup // import "k8s.io/apimachinery/pkg/apis/testapigroup" diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/BUILD new file mode 100644 index 00000000000..c97f83a5b75 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/BUILD @@ -0,0 +1,22 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["fuzzer.go"], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/fuzzer.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/fuzzer.go new file mode 100644 index 00000000000..573045eebdf --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer/fuzzer.go @@ -0,0 +1,99 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzzer + +import ( + "github.com/google/gofuzz" + + apitesting "k8s.io/apimachinery/pkg/api/testing" + "k8s.io/apimachinery/pkg/apis/testapigroup" + "k8s.io/apimachinery/pkg/apis/testapigroup/v1" + "k8s.io/apimachinery/pkg/runtime" + runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// overrideGenericFuncs override some generic fuzzer funcs from k8s.io/apimachinery in order to have more realistic +// values in a Kubernetes context. +func overrideGenericFuncs(t apitesting.TestingCommon, codecs runtimeserializer.CodecFactory) []interface{} { + return []interface{}{ + func(j *runtime.Object, c fuzz.Continue) { + // TODO: uncomment when round trip starts from a versioned object + if true { //c.RandBool() { + *j = &runtime.Unknown{ + // We do not set TypeMeta here because it is not carried through a round trip + Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), + ContentType: runtime.ContentTypeJSON, + } + } else { + types := []runtime.Object{&testapigroup.Carp{}} + t := types[c.Rand.Intn(len(types))] + c.Fuzz(t) + *j = t + } + }, + func(r *runtime.RawExtension, c fuzz.Continue) { + // Pick an arbitrary type and fuzz it + types := []runtime.Object{&testapigroup.Carp{}} + obj := types[c.Rand.Intn(len(types))] + c.Fuzz(obj) + + // Convert the object to raw bytes + bytes, err := runtime.Encode(apitesting.TestCodec(codecs, v1.SchemeGroupVersion), obj) + if err != nil { + t.Errorf("Failed to encode object: %v", err) + return + } + + // Set the bytes field on the RawExtension + r.Raw = bytes + }, + } +} + +func testapigroupFuncs(t apitesting.TestingCommon) []interface{} { + return []interface{}{ + func(s *testapigroup.CarpSpec, c fuzz.Continue) { + c.FuzzNoCustom(s) + // has a default value + ttl := int64(30) + if c.RandBool() { + ttl = int64(c.Uint32()) + } + s.TerminationGracePeriodSeconds = &ttl + + if s.SchedulerName == "" { + s.SchedulerName = "default-scheduler" + } + }, + func(j *testapigroup.CarpPhase, c fuzz.Continue) { + statuses := []testapigroup.CarpPhase{"Pending", "Running", "Succeeded", "Failed", "Unknown"} + *j = statuses[c.Rand.Intn(len(statuses))] + }, + func(rp *testapigroup.RestartPolicy, c fuzz.Continue) { + policies := []testapigroup.RestartPolicy{"Always", "Never", "OnFailure"} + *rp = policies[c.Rand.Intn(len(policies))] + }, + } +} + +func Funcs(t apitesting.TestingCommon, codecs runtimeserializer.CodecFactory) []interface{} { + return apitesting.MergeFuzzerFuncs(t, + apitesting.GenericFuzzerFuncs(t, codecs), + overrideGenericFuncs(t, codecs), + testapigroupFuncs(t), + ) +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD new file mode 100644 index 00000000000..2f00d9507cf --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD @@ -0,0 +1,21 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["install.go"], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/install.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/install.go new file mode 100644 index 00000000000..e6e3fc43ff0 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/install.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the certificates API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/apis/testapigroup" + "k8s.io/apimachinery/pkg/apis/testapigroup/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: testapigroup.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/apimachinery/pkg/apis/testapigroup", + AddInternalObjectsToScheme: testapigroup.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/apis/tpr/v1/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/register.go similarity index 66% rename from staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/apis/tpr/v1/register.go rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/register.go index 201310fef46..cbf628fc780 100644 --- a/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/apis/tpr/v1/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/register.go @@ -14,10 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package testapigroup import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -27,23 +26,26 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// GroupName is the group name use in this package. -const GroupName = "tpr.client-go.k8s.io" +// GroupName is the group name use in this package +const GroupName = "testapigroup.apimachinery.k8s.io" -// SchemeGroupVersion is the group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } -// addKnownTypes adds the set of types defined in this package to the supplied scheme. +// Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Example{}, - &ExampleList{}, + &Carp{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/types.go new file mode 100644 index 00000000000..3a63e899ec0 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/types.go @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testapigroup + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ( + ConditionStatus string + CarpConditionType string + CarpPhase string + RestartPolicy string +) + +// Carp is a collection of containers, used as either input (create, update) or as output (list, get). +type Carp struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a carp. + // +optional + Spec CarpSpec + + // Status represents the current information about a carp. This data may not be up + // to date. + // +optional + Status CarpStatus +} + +// CarpStatus represents information about the status of a carp. Status may trail the actual +// state of a system. +type CarpStatus struct { + // +optional + Phase CarpPhase + // +optional + Conditions []CarpCondition + // A human readable message indicating details about why the carp is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the carp is in this state. e.g. 'OutOfDisk' + // +optional + Reason string + + // +optional + HostIP string + // +optional + CarpIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the carp. + // +optional + StartTime *metav1.Time +} + +type CarpCondition struct { + Type CarpConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// CarpSpec is a description of a carp +type CarpSpec struct { + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the carp needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the carp are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the carp may be active on a node + // before the system actively tries to terminate the carp; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // NodeSelector is a selector which must be true for the carp to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this carp + // The carp will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + + // NodeName is a request to schedule this carp onto a specific node. If it is non-empty, + // the scheduler simply schedules this carp onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // Specifies the hostname of the Carp. + // If not specified, the carp's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Carp hostname will be "...svc.". + // If not specified, the carp will not have a domainname at all. + // +optional + Subdomain string + // If specified, the carp will be dispatched by specified scheduler. + // If not specified, the carp will be dispatched by default scheduler. + // +optional + SchedulerName string +} + +// CarpList is a list of Carps. +type CarpList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Carp +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/BUILD new file mode 100644 index 00000000000..350868e813e --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "generated.pb.go", + "register.go", + "types.generated.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/github.com/ugorji/go/codec:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) diff --git a/pkg/apimachinery/tests/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/conversion.go similarity index 71% rename from pkg/apimachinery/tests/doc.go rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/conversion.go index 8d5b45029cb..3e8448f4c56 100644 --- a/pkg/apimachinery/tests/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/conversion.go @@ -14,7 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package runs tests against the apimachinery which require a Scheme -// TODO Refactor the base types into the machinery and move these tests back. -// See https://github.com/kubernetes/kubernetes/issues/39611 -package tests +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions here. Currently there are none. + return nil +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/defaults.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/defaults.go new file mode 100644 index 00000000000..436ccde2964 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + // return RegisterDefaults(scheme) + return nil +} diff --git a/pkg/util/net/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go similarity index 65% rename from pkg/util/net/doc.go rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go index 61bf9e38aa4..92085894e3c 100644 --- a/pkg/util/net/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go @@ -14,8 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package net only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package net +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/testapigroup +// +k8s:openapi-gen=false +// +k8s:defaulter-gen=TypeMeta + +// +groupName=testapigroup.apimachinery.k8s.io +package v1 // import "k8s.io/apimachinery/pkg/apis/testapigroup/v1" diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.pb.go new file mode 100644 index 00000000000..c3c1751b2cf --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.pb.go @@ -0,0 +1,2009 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto + + It has these top-level messages: + Carp + CarpCondition + CarpList + CarpSpec + CarpStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Carp) Reset() { *m = Carp{} } +func (*Carp) ProtoMessage() {} +func (*Carp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CarpCondition) Reset() { *m = CarpCondition{} } +func (*CarpCondition) ProtoMessage() {} +func (*CarpCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CarpList) Reset() { *m = CarpList{} } +func (*CarpList) ProtoMessage() {} +func (*CarpList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *CarpSpec) Reset() { *m = CarpSpec{} } +func (*CarpSpec) ProtoMessage() {} +func (*CarpSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *CarpStatus) Reset() { *m = CarpStatus{} } +func (*CarpStatus) ProtoMessage() {} +func (*CarpStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*Carp)(nil), "k8s.io.apimachinery.pkg.apis.testapigroup.v1.Carp") + proto.RegisterType((*CarpCondition)(nil), "k8s.io.apimachinery.pkg.apis.testapigroup.v1.CarpCondition") + proto.RegisterType((*CarpList)(nil), "k8s.io.apimachinery.pkg.apis.testapigroup.v1.CarpList") + proto.RegisterType((*CarpSpec)(nil), "k8s.io.apimachinery.pkg.apis.testapigroup.v1.CarpSpec") + proto.RegisterType((*CarpStatus)(nil), "k8s.io.apimachinery.pkg.apis.testapigroup.v1.CarpStatus") +} +func (m *Carp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Carp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CarpCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CarpCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *CarpList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CarpList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CarpSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CarpSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i += copy(dAtA[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x3a + i++ + v := m.NodeSelector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(dAtA[i:], m.DeprecatedServiceAccount) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + dAtA[i] = 0x58 + i++ + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x60 + i++ + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) + return i, nil +} + +func (m *CarpStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CarpStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CarpIP))) + i += copy(dAtA[i:], m.CarpIP) + if m.StartTime != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n7, err := m.StartTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Carp) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CarpCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CarpList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CarpSpec) Size() (n int) { + var l int + _ = l + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CarpStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CarpIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Carp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Carp{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CarpSpec", "CarpSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CarpStatus", "CarpStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CarpCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CarpCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CarpList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CarpList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Carp", "Carp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CarpSpec) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&CarpSpec{`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `}`, + }, "") + return s +} +func (this *CarpStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CarpStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CarpCondition", "CarpCondition", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `CarpIP:` + fmt.Sprintf("%v", this.CarpIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Carp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Carp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Carp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CarpCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CarpCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CarpCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CarpConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CarpList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CarpList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CarpList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Carp{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CarpSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CarpSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CarpSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.NodeSelector[mapkey] = mapvalue + } else { + var mapvalue string + m.NodeSelector[mapkey] = mapvalue + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CarpStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CarpStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CarpStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = CarpPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CarpCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CarpIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CarpIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1065 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0xb6, 0x2c, 0xcb, 0x92, 0xd6, 0x56, 0x62, 0x6f, 0x62, 0x80, 0x31, 0x10, 0xc9, 0x71, 0x0b, + 0xc3, 0x29, 0x1a, 0xb2, 0x76, 0xd3, 0x22, 0x6d, 0x0f, 0x41, 0x68, 0x17, 0xb5, 0x0b, 0xff, 0x10, + 0x2b, 0x03, 0x01, 0x8a, 0x1e, 0xba, 0x22, 0x27, 0x32, 0x2b, 0x91, 0x4b, 0xec, 0xae, 0xd4, 0xea, + 0xd6, 0x47, 0x68, 0x1f, 0xa0, 0x4f, 0xd1, 0x43, 0x81, 0x3e, 0x81, 0x8f, 0x39, 0xe6, 0x24, 0xd4, + 0xea, 0x5b, 0xf8, 0x54, 0xec, 0xf2, 0x47, 0xa2, 0xe5, 0x38, 0xf2, 0x6d, 0x77, 0xe6, 0xfb, 0xbe, + 0x19, 0xee, 0x0c, 0x67, 0xd0, 0x71, 0xe7, 0x85, 0x30, 0x7d, 0x66, 0x75, 0x7a, 0x2d, 0xe0, 0x21, + 0x48, 0x10, 0x56, 0x1f, 0x42, 0x8f, 0x71, 0x2b, 0x71, 0xd0, 0xc8, 0x17, 0xc0, 0xfb, 0xc0, 0xad, + 0xa8, 0xd3, 0xd6, 0x37, 0x0b, 0x7e, 0xa5, 0x41, 0xd4, 0x05, 0xab, 0xbf, 0x63, 0xb5, 0x21, 0x04, + 0x4e, 0x25, 0x78, 0x66, 0xc4, 0x99, 0x64, 0xf8, 0xe3, 0x98, 0x65, 0x66, 0x2c, 0x33, 0xea, 0xb4, + 0xf5, 0xcd, 0x4c, 0x58, 0x66, 0x7f, 0x67, 0xfd, 0x59, 0xdb, 0x97, 0xe7, 0xbd, 0x96, 0xe9, 0xb2, + 0xc0, 0x6a, 0xb3, 0x36, 0xb3, 0x34, 0xb9, 0xd5, 0x7b, 0xa3, 0x6f, 0xfa, 0xa2, 0x4f, 0xb1, 0xe8, + 0xfa, 0xf3, 0x71, 0x2a, 0x01, 0x75, 0xcf, 0xfd, 0x10, 0xf8, 0x60, 0x9c, 0x4d, 0x00, 0x92, 0xde, + 0x90, 0xca, 0xba, 0xf5, 0x3e, 0x16, 0xef, 0x85, 0xd2, 0x0f, 0x60, 0x8a, 0xf0, 0xe5, 0x87, 0x08, + 0xc2, 0x3d, 0x87, 0x80, 0x4e, 0xf1, 0x3e, 0x7f, 0x1f, 0xaf, 0x27, 0xfd, 0xae, 0xe5, 0x87, 0x52, + 0x48, 0x7e, 0x9d, 0xb4, 0xf9, 0xc7, 0x3c, 0x2a, 0x3a, 0xcc, 0xc3, 0x3f, 0xa1, 0x8a, 0xfa, 0x00, + 0x8f, 0x4a, 0x6a, 0x14, 0x36, 0x0a, 0xdb, 0x4b, 0xbb, 0x9f, 0x99, 0xe3, 0x37, 0xcc, 0xf4, 0xc6, + 0xcf, 0xa8, 0xd0, 0x66, 0x7f, 0xc7, 0x3c, 0x6d, 0xfd, 0x0c, 0xae, 0x3c, 0x06, 0x49, 0x6d, 0x7c, + 0x31, 0x6c, 0xcc, 0x8d, 0x86, 0x0d, 0x34, 0xb6, 0x91, 0x4c, 0x15, 0x9f, 0xa2, 0x05, 0x11, 0x81, + 0x6b, 0xcc, 0x6b, 0xf5, 0x67, 0xe6, 0x2c, 0x15, 0x32, 0x1d, 0xe6, 0x35, 0x23, 0x70, 0xed, 0xe5, + 0x44, 0x7a, 0x41, 0xdd, 0x88, 0x16, 0xc2, 0xaf, 0xd1, 0xa2, 0x90, 0x54, 0xf6, 0x84, 0x51, 0xd4, + 0x92, 0xd6, 0xec, 0x92, 0x9a, 0x66, 0xdf, 0x4b, 0x44, 0x17, 0xe3, 0x3b, 0x49, 0xe4, 0x36, 0xff, + 0x2a, 0xa2, 0x65, 0x87, 0x79, 0x7b, 0x2c, 0xf4, 0x7c, 0xe9, 0xb3, 0x10, 0x3f, 0x47, 0x0b, 0x72, + 0x10, 0x81, 0x7e, 0x98, 0xaa, 0xbd, 0x91, 0xe6, 0x72, 0x36, 0x88, 0xe0, 0x6a, 0xd8, 0x58, 0x99, + 0xc4, 0x2a, 0x1b, 0xd1, 0x68, 0xfc, 0x55, 0x96, 0xdf, 0xbc, 0xe6, 0x3d, 0xc9, 0x87, 0xbb, 0x1a, + 0x36, 0xee, 0x67, 0xb4, 0x7c, 0x06, 0xb8, 0x8d, 0x6a, 0x5d, 0x2a, 0xa4, 0xc3, 0x59, 0x0b, 0xce, + 0xfc, 0x00, 0x92, 0x2f, 0xfc, 0x64, 0xb6, 0x92, 0x28, 0x86, 0xbd, 0x96, 0x44, 0xab, 0x1d, 0x4d, + 0x0a, 0x91, 0xbc, 0x2e, 0xee, 0x23, 0xac, 0x0c, 0x67, 0x9c, 0x86, 0x22, 0xce, 0x5f, 0x45, 0x5b, + 0xb8, 0x73, 0xb4, 0xf5, 0x24, 0x1a, 0x3e, 0x9a, 0x52, 0x23, 0x37, 0x44, 0xc0, 0x5b, 0x68, 0x91, + 0x03, 0x15, 0x2c, 0x34, 0x4a, 0xfa, 0x6d, 0xb2, 0x52, 0x10, 0x6d, 0x25, 0x89, 0x17, 0x3f, 0x45, + 0xe5, 0x00, 0x84, 0xa0, 0x6d, 0x30, 0x16, 0x35, 0xf0, 0x7e, 0x02, 0x2c, 0x1f, 0xc7, 0x66, 0x92, + 0xfa, 0x37, 0xff, 0x2e, 0xa0, 0xb2, 0xc3, 0xbc, 0x23, 0x5f, 0x48, 0xfc, 0xe3, 0x54, 0x37, 0x9b, + 0xb3, 0x7d, 0x8c, 0x62, 0xeb, 0x5e, 0x5e, 0x49, 0xe2, 0x54, 0x52, 0xcb, 0x44, 0x27, 0x9f, 0xa0, + 0x92, 0x2f, 0x21, 0x50, 0x75, 0x2d, 0x6e, 0x2f, 0xed, 0x3e, 0x9d, 0xb9, 0xef, 0xec, 0x5a, 0xa2, + 0x5a, 0x3a, 0x54, 0x7c, 0x12, 0xcb, 0x6c, 0xfe, 0x53, 0xd6, 0x99, 0xab, 0xd6, 0xc6, 0x47, 0xa8, + 0xc6, 0x41, 0x48, 0xca, 0xa5, 0xc3, 0xba, 0xbe, 0x3b, 0xd0, 0x95, 0xaf, 0xda, 0x5b, 0x69, 0x35, + 0xc9, 0xa4, 0xf3, 0xea, 0xba, 0x81, 0xe4, 0xc9, 0xb8, 0x8d, 0x1e, 0x4b, 0xe0, 0x81, 0x1f, 0x52, + 0xf5, 0xf2, 0xdf, 0x71, 0xea, 0x82, 0x03, 0xdc, 0x67, 0x5e, 0x13, 0x5c, 0x16, 0x7a, 0x42, 0x57, + 0xba, 0x68, 0x3f, 0x19, 0x0d, 0x1b, 0x8f, 0xcf, 0x6e, 0x03, 0x92, 0xdb, 0x75, 0xf0, 0x29, 0x5a, + 0xa3, 0xae, 0xf4, 0xfb, 0xb0, 0x0f, 0xd4, 0xeb, 0xfa, 0x21, 0xa4, 0x01, 0x4a, 0x3a, 0xc0, 0xa3, + 0xd1, 0xb0, 0xb1, 0xf6, 0xea, 0x26, 0x00, 0xb9, 0x99, 0x87, 0x07, 0x68, 0x39, 0x64, 0x1e, 0x34, + 0xa1, 0x0b, 0xae, 0x64, 0xdc, 0x28, 0xeb, 0xa7, 0x7e, 0x79, 0xa7, 0xa9, 0x61, 0x9e, 0x4c, 0x28, + 0x7c, 0x1b, 0x4a, 0x3e, 0xb0, 0x1f, 0x26, 0xef, 0xb8, 0x3c, 0xe9, 0x22, 0xb9, 0x50, 0xf8, 0x7b, + 0x84, 0x95, 0xb6, 0xef, 0xc2, 0x2b, 0xd7, 0x65, 0xbd, 0x50, 0x9e, 0xd0, 0x00, 0x8c, 0x8a, 0xae, + 0x43, 0xd6, 0xe7, 0xcd, 0x29, 0x04, 0xb9, 0x81, 0x85, 0x0f, 0xd0, 0xbd, 0xbc, 0xd5, 0xa8, 0xe6, + 0x66, 0x88, 0xb1, 0x0f, 0x11, 0x07, 0x57, 0x0d, 0xe4, 0xbc, 0x22, 0xb9, 0xc6, 0xc3, 0x9f, 0xa2, + 0x8a, 0xca, 0x52, 0xe7, 0x82, 0xb4, 0x46, 0xd6, 0xa2, 0x27, 0x89, 0x9d, 0x64, 0x08, 0xfc, 0x05, + 0x5a, 0x3a, 0x67, 0x42, 0x9e, 0x80, 0xfc, 0x85, 0xf1, 0x8e, 0xb1, 0xb4, 0x51, 0xd8, 0xae, 0xd8, + 0x0f, 0x12, 0xc2, 0xd2, 0xc1, 0xd8, 0x45, 0x26, 0x71, 0xea, 0x77, 0x53, 0x57, 0xe7, 0x70, 0xdf, + 0x58, 0xd6, 0x94, 0xec, 0x77, 0x3b, 0x88, 0xcd, 0x24, 0xf5, 0xa7, 0xd0, 0x43, 0x67, 0xcf, 0xa8, + 0x4d, 0x43, 0x0f, 0x9d, 0x3d, 0x92, 0xfa, 0x55, 0xea, 0xea, 0x18, 0xaa, 0xd4, 0x57, 0xf2, 0xa9, + 0x1f, 0x24, 0x76, 0x92, 0x21, 0xb0, 0x85, 0xaa, 0xa2, 0xd7, 0xf2, 0x58, 0x40, 0xfd, 0xd0, 0x58, + 0xd5, 0xf0, 0xd5, 0x04, 0x5e, 0x6d, 0xa6, 0x0e, 0x32, 0xc6, 0xe0, 0x6f, 0x50, 0x4d, 0x6d, 0x44, + 0xaf, 0xd7, 0x05, 0xae, 0x63, 0x3c, 0xd0, 0xa4, 0x6c, 0x00, 0x36, 0x53, 0xa7, 0x7e, 0xa3, 0x3c, + 0x76, 0xfd, 0x25, 0x5a, 0x9d, 0xea, 0x12, 0xbc, 0x82, 0x8a, 0x1d, 0x18, 0xc4, 0xe3, 0x9e, 0xa8, + 0x23, 0x7e, 0x88, 0x4a, 0x7d, 0xda, 0xed, 0x41, 0x3c, 0xca, 0x49, 0x7c, 0xf9, 0x7a, 0xfe, 0x45, + 0x61, 0xf3, 0xcf, 0x22, 0xaa, 0x66, 0x2b, 0x05, 0x5b, 0xa8, 0x14, 0x9d, 0x53, 0x91, 0xae, 0x8a, + 0x47, 0xe9, 0xff, 0xee, 0x28, 0xe3, 0xd5, 0xb0, 0x51, 0x71, 0x98, 0xa7, 0xcf, 0x24, 0xc6, 0xe1, + 0x37, 0x08, 0xb9, 0xe9, 0x12, 0x48, 0x07, 0xca, 0xee, 0xcc, 0x5d, 0x9e, 0xed, 0x8f, 0xf1, 0xee, + 0xcd, 0x4c, 0x82, 0x4c, 0x28, 0x4f, 0x0e, 0xd2, 0xe2, 0xed, 0x83, 0x74, 0x62, 0x36, 0x2f, 0xdc, + 0x3a, 0x9b, 0xb7, 0xd0, 0x62, 0x5c, 0xe1, 0xeb, 0x33, 0x3c, 0x6e, 0x00, 0x92, 0x78, 0xf1, 0x47, + 0xa8, 0x14, 0x31, 0xef, 0xd0, 0x49, 0x26, 0x78, 0x36, 0x03, 0x1d, 0x65, 0x24, 0xb1, 0x0f, 0xbf, + 0x46, 0x55, 0x3d, 0xb8, 0xf4, 0xfe, 0x29, 0xdf, 0x79, 0xff, 0xd4, 0x74, 0x77, 0xa4, 0x02, 0x64, + 0xac, 0x65, 0x6f, 0x5f, 0x5c, 0xd6, 0xe7, 0xde, 0x5e, 0xd6, 0xe7, 0xde, 0x5d, 0xd6, 0xe7, 0x7e, + 0x1b, 0xd5, 0x0b, 0x17, 0xa3, 0x7a, 0xe1, 0xed, 0xa8, 0x5e, 0x78, 0x37, 0xaa, 0x17, 0xfe, 0x1d, + 0xd5, 0x0b, 0xbf, 0xff, 0x57, 0x9f, 0xfb, 0x61, 0xbe, 0xbf, 0xf3, 0x7f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x55, 0x69, 0xbd, 0x27, 0x83, 0x0a, 0x00, 0x00, +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto new file mode 100644 index 00000000000..15658347ec9 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/generated.proto @@ -0,0 +1,212 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.testapigroup.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Carp is a collection of containers, used as either input (create, update) or as output (list, get). +message Carp { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the carp. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional CarpSpec spec = 2; + + // Most recently observed status of the carp. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional CarpStatus status = 3; +} + +message CarpCondition { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// CarpList is a list of Carps. +message CarpList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of carps. + // More info: http://kubernetes.io/docs/user-guide/carps + repeated Carp items = 2; +} + +// CarpSpec is a description of a carp +message CarpSpec { + // Restart policy for all containers within the carp. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/carp-states#restartpolicy + // +optional + optional string restartPolicy = 3; + + // Optional duration in seconds the carp needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the carp are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the carp may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + optional int64 activeDeadlineSeconds = 5; + + // NodeSelector is a selector which must be true for the carp to fit on a node. + // Selector which must match a node's labels for the carp to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + map nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this carp. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + optional string serviceAccount = 9; + + // NodeName is a request to schedule this carp onto a specific node. If it is non-empty, + // the scheduler simply schedules this carp onto that node, assuming that it fits resource + // requirements. + // +optional + optional string nodeName = 10; + + // Host networking requested for this carp. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostIPC = 13; + + // Specifies the hostname of the Carp + // If not specified, the carp's hostname will be set to a system-defined value. + // +optional + optional string hostname = 16; + + // If specified, the fully qualified Carp hostname will be "...svc.". + // If not specified, the carp will not have a domainname at all. + // +optional + optional string subdomain = 17; + + // If specified, the carp will be dispatched by specified scheduler. + // If not specified, the carp will be dispatched by default scheduler. + // +optional + optional string schedulername = 19; +} + +// CarpStatus represents information about the status of a carp. Status may trail the actual +// state of a system. +message CarpStatus { + // Current condition of the carp. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-phase + // +optional + optional string phase = 1; + + // Current service state of carp. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + // +optional + repeated CarpCondition conditions = 2; + + // A human readable message indicating details about why the carp is in this condition. + // +optional + optional string message = 3; + + // A brief CamelCase message indicating details about why the carp is in this state. + // e.g. 'OutOfDisk' + // +optional + optional string reason = 4; + + // IP address of the host to which the carp is assigned. Empty if not yet scheduled. + // +optional + optional string hostIP = 5; + + // IP address allocated to the carp. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + optional string carpIP = 6; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the carp. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; +} + diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/register.go new file mode 100644 index 00000000000..d04bc1b58ab --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/register.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "testapigroup.apimachinery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addConversionFuncs, addDefaultingFuncs) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Carp{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.generated.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.generated.go new file mode 100644 index 00000000000..3b58d2000ab --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.generated.go @@ -0,0 +1,3132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x CarpConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *CarpConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x CarpPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *CarpPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Carp) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Carp) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Carp) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CarpSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CarpStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Carp) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CarpSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CarpStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CarpStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.Conditions) != 0 + yyq2[2] = x.Message != "" + yyq2[3] = x.Reason != "" + yyq2[4] = x.HostIP != "" + yyq2[5] = x.CarpIP != "" + yyq2[6] = x.StartTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceCarpCondition(([]CarpCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceCarpCondition(([]CarpCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.CarpIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("carpIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.CarpIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym22 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym23 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym23 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CarpStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CarpStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv5 := &x.Conditions + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceCarpCondition((*[]CarpCondition)(yyv5), d) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "carpIP": + if r.TryDecodeAsNil() { + x.CarpIP = "" + } else { + yyv13 := &x.CarpIP + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym16 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CarpStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv18 := &x.Phase + yyv18.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv19 := &x.Conditions + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCarpCondition((*[]CarpCondition)(yyv19), d) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv21 := &x.Message + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv23 := &x.Reason + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv25 := &x.HostIP + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CarpIP = "" + } else { + yyv27 := &x.CarpIP + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym30 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym30 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj17-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CarpCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CarpCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CarpCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CarpCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CarpSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [13]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RestartPolicy != "" + yyq2[1] = x.TerminationGracePeriodSeconds != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = len(x.NodeSelector) != 0 + yyq2[4] = x.ServiceAccountName != "" + yyq2[5] = x.DeprecatedServiceAccount != "" + yyq2[6] = x.NodeName != "" + yyq2[7] = x.HostNetwork != false + yyq2[8] = x.HostPID != false + yyq2[9] = x.HostIPC != false + yyq2[10] = x.Hostname != "" + yyq2[11] = x.Subdomain != "" + yyq2[12] = x.SchedulerName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(13) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.RestartPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.RestartPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy7 := *x.TerminationGracePeriodSeconds + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy9 := *x.TerminationGracePeriodSeconds + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy12 := *x.ActiveDeadlineSeconds + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(yy14)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulername")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CarpSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CarpSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "restartPolicy": + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv4 := &x.RestartPolicy + yyv4.CodecDecodeSelf(d) + } + case "terminationGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "nodeSelector": + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv9 := &x.NodeSelector + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecMapStringStringX(yyv9, false, d) + } + } + case "serviceAccountName": + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv11 := &x.ServiceAccountName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "serviceAccount": + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv13 := &x.DeprecatedServiceAccount + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv15 := &x.NodeName + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv17 := &x.HostNetwork + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv19 := &x.HostPID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv21 := &x.HostIPC + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv23 := &x.Hostname + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv25 := &x.Subdomain + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + case "schedulername": + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv27 := &x.SchedulerName + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CarpSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj29 int + var yyb29 bool + var yyhl29 bool = l >= 0 + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv30 := &x.RestartPolicy + yyv30.CodecDecodeSelf(d) + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv35 := &x.NodeSelector + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + z.F.DecMapStringStringX(yyv35, false, d) + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv37 := &x.ServiceAccountName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv39 := &x.DeprecatedServiceAccount + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv41 := &x.NodeName + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv43 := &x.HostNetwork + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv45 := &x.HostPID + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*bool)(yyv45)) = r.DecodeBool() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv47 := &x.HostIPC + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*bool)(yyv47)) = r.DecodeBool() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv49 := &x.Hostname + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + *((*string)(yyv49)) = r.DecodeString() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv51 := &x.Subdomain + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*string)(yyv51)) = r.DecodeString() + } + } + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv53 := &x.SchedulerName + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*string)(yyv53)) = r.DecodeString() + } + } + for { + yyj29++ + if yyhl29 { + yyb29 = yyj29 > l + } else { + yyb29 = r.CheckBreak() + } + if yyb29 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj29-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CarpList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCarp(([]Carp)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCarp(([]Carp)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CarpList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CarpList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCarp((*[]Carp)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CarpList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCarp((*[]Carp)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCarpCondition(v []CarpCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCarpCondition(v *[]CarpCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CarpCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CarpCondition, yyrl1) + } + } else { + yyv1 = make([]CarpCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CarpCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CarpCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CarpCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CarpCondition{}) // var yyz1 CarpCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CarpCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CarpCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCarp(v []Carp, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCarp(v *[]Carp, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Carp{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 520) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Carp, yyrl1) + } + } else { + yyv1 = make([]Carp, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Carp{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Carp{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Carp{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Carp{}) // var yyz1 Carp + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Carp{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Carp{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go new file mode 100644 index 00000000000..995a28a7ed0 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go @@ -0,0 +1,192 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ( + ConditionStatus string + CarpConditionType string + CarpPhase string + RestartPolicy string +) + +// Carp is a collection of containers, used as either input (create, update) or as output (list, get). +type Carp struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the carp. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec CarpSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the carp. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status CarpStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// CarpStatus represents information about the status of a carp. Status may trail the actual +// state of a system. +type CarpStatus struct { + // Current condition of the carp. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-phase + // +optional + Phase CarpPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=CarpPhase"` + // Current service state of carp. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + // +optional + Conditions []CarpCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + // A human readable message indicating details about why the carp is in this condition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A brief CamelCase message indicating details about why the carp is in this state. + // e.g. 'OutOfDisk' + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + + // IP address of the host to which the carp is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` + // IP address allocated to the carp. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + CarpIP string `json:"carpIP,omitempty" protobuf:"bytes,6,opt,name=carpIP"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the carp. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` +} + +type CarpCondition struct { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + Type CarpConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CarpConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/carp-states#carp-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// CarpSpec is a description of a carp +type CarpSpec struct { + // Restart policy for all containers within the carp. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/carp-states#restartpolicy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the carp needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the carp are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the carp may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // NodeSelector is a selector which must be true for the carp to fit on a node. + // Selector which must match a node's labels for the carp to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this carp. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + + // NodeName is a request to schedule this carp onto a specific node. If it is non-empty, + // the scheduler simply schedules this carp onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this carp. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // Specifies the hostname of the Carp + // If not specified, the carp's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Carp hostname will be "...svc.". + // If not specified, the carp will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the carp will be dispatched by specified scheduler. + // If not specified, the carp will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulername,omitempty" protobuf:"bytes,19,opt,name=schedulername"` +} + +// CarpList is a list of Carps. +type CarpList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of carps. + // More info: http://kubernetes.io/docs/user-guide/carps + Items []Carp `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/mesos/mesos-go/NOTICE b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types_swagger_doc_generated.go similarity index 90% rename from vendor/github.com/mesos/mesos-go/NOTICE rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types_swagger_doc_generated.go index 491bbe14560..c7be42d5a19 100644 --- a/vendor/github.com/mesos/mesos-go/NOTICE +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types_swagger_doc_generated.go @@ -1,4 +1,5 @@ -Copyright 2013-2015, Mesosphere, Inc. +/* +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -11,3 +12,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ + +package v1 diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go new file mode 100644 index 00000000000..bd3dbf1d694 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go @@ -0,0 +1,226 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testapigroup "k8s.io/apimachinery/pkg/apis/testapigroup" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_Carp_To_testapigroup_Carp, + Convert_testapigroup_Carp_To_v1_Carp, + Convert_v1_CarpCondition_To_testapigroup_CarpCondition, + Convert_testapigroup_CarpCondition_To_v1_CarpCondition, + Convert_v1_CarpList_To_testapigroup_CarpList, + Convert_testapigroup_CarpList_To_v1_CarpList, + Convert_v1_CarpSpec_To_testapigroup_CarpSpec, + Convert_testapigroup_CarpSpec_To_v1_CarpSpec, + Convert_v1_CarpStatus_To_testapigroup_CarpStatus, + Convert_testapigroup_CarpStatus_To_v1_CarpStatus, + ) +} + +func autoConvert_v1_Carp_To_testapigroup_Carp(in *Carp, out *testapigroup.Carp, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_CarpSpec_To_testapigroup_CarpSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_CarpStatus_To_testapigroup_CarpStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Carp_To_testapigroup_Carp is an autogenerated conversion function. +func Convert_v1_Carp_To_testapigroup_Carp(in *Carp, out *testapigroup.Carp, s conversion.Scope) error { + return autoConvert_v1_Carp_To_testapigroup_Carp(in, out, s) +} + +func autoConvert_testapigroup_Carp_To_v1_Carp(in *testapigroup.Carp, out *Carp, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_testapigroup_CarpSpec_To_v1_CarpSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_testapigroup_CarpStatus_To_v1_CarpStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_testapigroup_Carp_To_v1_Carp is an autogenerated conversion function. +func Convert_testapigroup_Carp_To_v1_Carp(in *testapigroup.Carp, out *Carp, s conversion.Scope) error { + return autoConvert_testapigroup_Carp_To_v1_Carp(in, out, s) +} + +func autoConvert_v1_CarpCondition_To_testapigroup_CarpCondition(in *CarpCondition, out *testapigroup.CarpCondition, s conversion.Scope) error { + out.Type = testapigroup.CarpConditionType(in.Type) + out.Status = testapigroup.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_CarpCondition_To_testapigroup_CarpCondition is an autogenerated conversion function. +func Convert_v1_CarpCondition_To_testapigroup_CarpCondition(in *CarpCondition, out *testapigroup.CarpCondition, s conversion.Scope) error { + return autoConvert_v1_CarpCondition_To_testapigroup_CarpCondition(in, out, s) +} + +func autoConvert_testapigroup_CarpCondition_To_v1_CarpCondition(in *testapigroup.CarpCondition, out *CarpCondition, s conversion.Scope) error { + out.Type = CarpConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_testapigroup_CarpCondition_To_v1_CarpCondition is an autogenerated conversion function. +func Convert_testapigroup_CarpCondition_To_v1_CarpCondition(in *testapigroup.CarpCondition, out *CarpCondition, s conversion.Scope) error { + return autoConvert_testapigroup_CarpCondition_To_v1_CarpCondition(in, out, s) +} + +func autoConvert_v1_CarpList_To_testapigroup_CarpList(in *CarpList, out *testapigroup.CarpList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]testapigroup.Carp, len(*in)) + for i := range *in { + if err := Convert_v1_Carp_To_testapigroup_Carp(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_CarpList_To_testapigroup_CarpList is an autogenerated conversion function. +func Convert_v1_CarpList_To_testapigroup_CarpList(in *CarpList, out *testapigroup.CarpList, s conversion.Scope) error { + return autoConvert_v1_CarpList_To_testapigroup_CarpList(in, out, s) +} + +func autoConvert_testapigroup_CarpList_To_v1_CarpList(in *testapigroup.CarpList, out *CarpList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Carp, len(*in)) + for i := range *in { + if err := Convert_testapigroup_Carp_To_v1_Carp(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Carp, 0) + } + return nil +} + +// Convert_testapigroup_CarpList_To_v1_CarpList is an autogenerated conversion function. +func Convert_testapigroup_CarpList_To_v1_CarpList(in *testapigroup.CarpList, out *CarpList, s conversion.Scope) error { + return autoConvert_testapigroup_CarpList_To_v1_CarpList(in, out, s) +} + +func autoConvert_v1_CarpSpec_To_testapigroup_CarpSpec(in *CarpSpec, out *testapigroup.CarpSpec, s conversion.Scope) error { + out.RestartPolicy = testapigroup.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.SchedulerName = in.SchedulerName + return nil +} + +// Convert_v1_CarpSpec_To_testapigroup_CarpSpec is an autogenerated conversion function. +func Convert_v1_CarpSpec_To_testapigroup_CarpSpec(in *CarpSpec, out *testapigroup.CarpSpec, s conversion.Scope) error { + return autoConvert_v1_CarpSpec_To_testapigroup_CarpSpec(in, out, s) +} + +func autoConvert_testapigroup_CarpSpec_To_v1_CarpSpec(in *testapigroup.CarpSpec, out *CarpSpec, s conversion.Scope) error { + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.NodeName = in.NodeName + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.SchedulerName = in.SchedulerName + return nil +} + +// Convert_testapigroup_CarpSpec_To_v1_CarpSpec is an autogenerated conversion function. +func Convert_testapigroup_CarpSpec_To_v1_CarpSpec(in *testapigroup.CarpSpec, out *CarpSpec, s conversion.Scope) error { + return autoConvert_testapigroup_CarpSpec_To_v1_CarpSpec(in, out, s) +} + +func autoConvert_v1_CarpStatus_To_testapigroup_CarpStatus(in *CarpStatus, out *testapigroup.CarpStatus, s conversion.Scope) error { + out.Phase = testapigroup.CarpPhase(in.Phase) + out.Conditions = *(*[]testapigroup.CarpCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.CarpIP = in.CarpIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + return nil +} + +// Convert_v1_CarpStatus_To_testapigroup_CarpStatus is an autogenerated conversion function. +func Convert_v1_CarpStatus_To_testapigroup_CarpStatus(in *CarpStatus, out *testapigroup.CarpStatus, s conversion.Scope) error { + return autoConvert_v1_CarpStatus_To_testapigroup_CarpStatus(in, out, s) +} + +func autoConvert_testapigroup_CarpStatus_To_v1_CarpStatus(in *testapigroup.CarpStatus, out *CarpStatus, s conversion.Scope) error { + out.Phase = CarpPhase(in.Phase) + out.Conditions = *(*[]CarpCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.CarpIP = in.CarpIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + return nil +} + +// Convert_testapigroup_CarpStatus_To_v1_CarpStatus is an autogenerated conversion function. +func Convert_testapigroup_CarpStatus_To_v1_CarpStatus(in *testapigroup.CarpStatus, out *CarpStatus, s conversion.Scope) error { + return autoConvert_testapigroup_CarpStatus_To_v1_CarpStatus(in, out, s) +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..9fdd8c6a75e --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go @@ -0,0 +1,155 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Carp, InType: reflect.TypeOf(&Carp{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CarpCondition, InType: reflect.TypeOf(&CarpCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CarpList, InType: reflect.TypeOf(&CarpList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CarpSpec, InType: reflect.TypeOf(&CarpSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CarpStatus, InType: reflect.TypeOf(&CarpStatus{})}, + ) +} + +// DeepCopy_v1_Carp is an autogenerated deepcopy function. +func DeepCopy_v1_Carp(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Carp) + out := out.(*Carp) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if newVal, err := c.DeepCopy(&in.Spec); err != nil { + return err + } else { + out.Spec = *newVal.(*CarpSpec) + } + if newVal, err := c.DeepCopy(&in.Status); err != nil { + return err + } else { + out.Status = *newVal.(*CarpStatus) + } + return nil + } +} + +// DeepCopy_v1_CarpCondition is an autogenerated deepcopy function. +func DeepCopy_v1_CarpCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpCondition) + out := out.(*CarpCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_v1_CarpList is an autogenerated deepcopy function. +func DeepCopy_v1_CarpList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpList) + out := out.(*CarpList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Carp, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*Carp) + } + } + } + return nil + } +} + +// DeepCopy_v1_CarpSpec is an autogenerated deepcopy function. +func DeepCopy_v1_CarpSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpSpec) + out := out.(*CarpSpec) + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_v1_CarpStatus is an autogenerated deepcopy function. +func DeepCopy_v1_CarpStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpStatus) + out := out.(*CarpStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CarpCondition, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*CarpCondition) + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} diff --git a/pkg/util/intstr/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go similarity index 56% rename from pkg/util/intstr/doc.go rename to staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go index e5221d4697a..6df448eb9fd 100644 --- a/pkg/util/intstr/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go @@ -1,3 +1,5 @@ +// +build !ignore_autogenerated + /* Copyright 2017 The Kubernetes Authors. @@ -14,8 +16,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package intstr only exists until heapster rebases -// TODO genericapiserver remove this empty package. Godep fails without this because heapster relies -// on this package. This will allow us to start splitting packages, but will force -// heapster to update on their next kube rebase. -package intstr +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go new file mode 100644 index 00000000000..0b177baef0c --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go @@ -0,0 +1,155 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package testapigroup + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_testapigroup_Carp, InType: reflect.TypeOf(&Carp{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_testapigroup_CarpCondition, InType: reflect.TypeOf(&CarpCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_testapigroup_CarpList, InType: reflect.TypeOf(&CarpList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_testapigroup_CarpSpec, InType: reflect.TypeOf(&CarpSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_testapigroup_CarpStatus, InType: reflect.TypeOf(&CarpStatus{})}, + ) +} + +// DeepCopy_testapigroup_Carp is an autogenerated deepcopy function. +func DeepCopy_testapigroup_Carp(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Carp) + out := out.(*Carp) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if newVal, err := c.DeepCopy(&in.Spec); err != nil { + return err + } else { + out.Spec = *newVal.(*CarpSpec) + } + if newVal, err := c.DeepCopy(&in.Status); err != nil { + return err + } else { + out.Status = *newVal.(*CarpStatus) + } + return nil + } +} + +// DeepCopy_testapigroup_CarpCondition is an autogenerated deepcopy function. +func DeepCopy_testapigroup_CarpCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpCondition) + out := out.(*CarpCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_testapigroup_CarpList is an autogenerated deepcopy function. +func DeepCopy_testapigroup_CarpList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpList) + out := out.(*CarpList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Carp, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*Carp) + } + } + } + return nil + } +} + +// DeepCopy_testapigroup_CarpSpec is an autogenerated deepcopy function. +func DeepCopy_testapigroup_CarpSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpSpec) + out := out.(*CarpSpec) + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_testapigroup_CarpStatus is an autogenerated deepcopy function. +func DeepCopy_testapigroup_CarpStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CarpStatus) + out := out.(*CarpStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CarpCondition, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*CarpCondition) + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go b/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go index cf84a619898..8dab1dbfe93 100644 --- a/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go +++ b/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go @@ -39,7 +39,7 @@ import ( // Converter is an interface for converting between interface{} // and map[string]interface representation. type Converter interface { - ToUnstructured(obj interface{}, u *map[string]interface{}) error + ToUnstructured(obj interface{}) (map[string]interface{}, error) FromUnstructured(u map[string]interface{}, obj interface{}) error } @@ -388,12 +388,13 @@ func interfaceFromUnstructured(sv, dv reflect.Value) error { return nil } -func (c *converterImpl) ToUnstructured(obj interface{}, u *map[string]interface{}) error { +func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { - return fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) } + u := &map[string]interface{}{} err := toUnstructured(value.Elem(), reflect.ValueOf(u).Elem()) if c.mismatchDetection { newUnstr := &map[string]interface{}{} @@ -405,7 +406,10 @@ func (c *converterImpl) ToUnstructured(obj interface{}, u *map[string]interface{ glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) } } - return err + if err != nil { + return nil, err + } + return *u, nil } func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { diff --git a/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter_test.go b/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter_test.go index cbab0d1dad7..68ca9e203f5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/converter_test.go @@ -121,8 +121,7 @@ func doRoundTrip(t *testing.T, item runtime.Object) { return } - newUnstr := make(map[string]interface{}) - err = DefaultConverter.ToUnstructured(item, &newUnstr) + newUnstr, err := DefaultConverter.ToUnstructured(item) if err != nil { t.Errorf("ToUnstructured failed: %v", err) return diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD index 4fd7d5a7eeb..4a62f1aa7a6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD @@ -63,6 +63,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/conversion_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/conversion_test.go index 692a5bfbb39..33670415b44 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/conversion_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/conversion_test.go @@ -22,37 +22,17 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + runtimetesting "k8s.io/apimachinery/pkg/runtime/testing" ) -type InternalComplex struct { - runtime.TypeMeta - String string - Integer int - Integer64 int64 - Int64 int64 - Bool bool -} - -type ExternalComplex struct { - runtime.TypeMeta `json:",inline"` - String string `json:"string" description:"testing"` - Integer int `json:"int"` - Integer64 int64 `json:",omitempty"` - Int64 int64 - Bool bool `json:"bool"` -} - -func (obj *InternalComplex) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ExternalComplex) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - func TestStringMapConversion(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "external"} scheme := runtime.NewScheme() scheme.Log(t) - scheme.AddKnownTypeWithName(internalGV.WithKind("Complex"), &InternalComplex{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("Complex"), &ExternalComplex{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("Complex"), &runtimetesting.InternalComplex{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Complex"), &runtimetesting.ExternalComplex{}) testCases := map[string]struct { input map[string][]string @@ -66,62 +46,62 @@ func TestStringMapConversion(t *testing.T) { "int": {"1"}, "Integer64": {"2"}, }, - expected: &ExternalComplex{String: "value", Integer: 1}, + expected: &runtimetesting.ExternalComplex{String: "value", Integer: 1}, }, "returns error on bad int": { input: map[string][]string{ "int": {"a"}, }, errFn: func(err error) bool { return err != nil }, - expected: &ExternalComplex{}, + expected: &runtimetesting.ExternalComplex{}, }, "parses int64": { input: map[string][]string{ "Int64": {"-1"}, }, - expected: &ExternalComplex{Int64: -1}, + expected: &runtimetesting.ExternalComplex{Int64: -1}, }, "returns error on bad int64": { input: map[string][]string{ "Int64": {"a"}, }, errFn: func(err error) bool { return err != nil }, - expected: &ExternalComplex{}, + expected: &runtimetesting.ExternalComplex{}, }, "parses boolean true": { input: map[string][]string{ "bool": {"true"}, }, - expected: &ExternalComplex{Bool: true}, + expected: &runtimetesting.ExternalComplex{Bool: true}, }, "parses boolean any value": { input: map[string][]string{ "bool": {"foo"}, }, - expected: &ExternalComplex{Bool: true}, + expected: &runtimetesting.ExternalComplex{Bool: true}, }, "parses boolean false": { input: map[string][]string{ "bool": {"false"}, }, - expected: &ExternalComplex{Bool: false}, + expected: &runtimetesting.ExternalComplex{Bool: false}, }, "parses boolean empty value": { input: map[string][]string{ "bool": {""}, }, - expected: &ExternalComplex{Bool: true}, + expected: &runtimetesting.ExternalComplex{Bool: true}, }, "parses boolean no value": { input: map[string][]string{ "bool": {}, }, - expected: &ExternalComplex{Bool: false}, + expected: &runtimetesting.ExternalComplex{Bool: false}, }, } for k, tc := range testCases { - out := &ExternalComplex{} + out := &runtimetesting.ExternalComplex{} if err := scheme.Convert(&tc.input, out, nil); (tc.errFn == nil && err != nil) || (tc.errFn != nil && !tc.errFn(err)) { t.Errorf("%s: unexpected error: %v", k, err) continue diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/embedded_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/embedded_test.go index a70b7f9b6d3..37a145253af 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/embedded_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/embedded_test.go @@ -25,50 +25,18 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + runtimetesting "k8s.io/apimachinery/pkg/runtime/testing" "k8s.io/apimachinery/pkg/util/diff" ) -type EmbeddedTest struct { - runtime.TypeMeta - ID string - Object runtime.Object - EmptyObject runtime.Object -} - -type EmbeddedTestExternal struct { - runtime.TypeMeta `json:",inline"` - ID string `json:"id,omitempty"` - Object runtime.RawExtension `json:"object,omitempty"` - EmptyObject runtime.RawExtension `json:"emptyObject,omitempty"` -} - -type ObjectTest struct { - runtime.TypeMeta - - ID string - Items []runtime.Object -} - -type ObjectTestExternal struct { - runtime.TypeMeta `yaml:",inline" json:",inline"` - - ID string `json:"id,omitempty"` - Items []runtime.RawExtension `json:"items,omitempty"` -} - -func (obj *ObjectTest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ObjectTestExternal) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *EmbeddedTest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *EmbeddedTestExternal) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - func TestDecodeEmptyRawExtensionAsObject(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "v1test"} externalGVK := externalGV.WithKind("ObjectTest") s := runtime.NewScheme() - s.AddKnownTypes(internalGV, &ObjectTest{}) - s.AddKnownTypeWithName(externalGVK, &ObjectTestExternal{}) + s.AddKnownTypes(internalGV, &runtimetesting.ObjectTest{}) + s.AddKnownTypeWithName(externalGVK, &runtimetesting.ObjectTestExternal{}) codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) @@ -76,7 +44,7 @@ func TestDecodeEmptyRawExtensionAsObject(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - test := obj.(*ObjectTest) + test := obj.(*runtimetesting.ObjectTest) if unk, ok := test.Items[0].(*runtime.Unknown); !ok || unk.Kind != "" || unk.APIVersion != "" || string(unk.Raw) != "{}" || unk.ContentType != runtime.ContentTypeJSON { t.Fatalf("unexpected object: %#v", test.Items[0]) } @@ -88,7 +56,7 @@ func TestDecodeEmptyRawExtensionAsObject(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - test = obj.(*ObjectTest) + test = obj.(*runtimetesting.ObjectTest) if unk, ok := test.Items[0].(*runtime.Unknown); !ok || unk.Kind != "" || unk.APIVersion != "" || string(unk.Raw) != `{"kind":"Other","apiVersion":"v1"}` || unk.ContentType != runtime.ContentTypeJSON { t.Fatalf("unexpected object: %#v", test.Items[0]) } @@ -102,29 +70,29 @@ func TestArrayOfRuntimeObject(t *testing.T) { externalGV := schema.GroupVersion{Group: "test.group", Version: "v1test"} s := runtime.NewScheme() - s.AddKnownTypes(internalGV, &EmbeddedTest{}) - s.AddKnownTypeWithName(externalGV.WithKind("EmbeddedTest"), &EmbeddedTestExternal{}) - s.AddKnownTypes(internalGV, &ObjectTest{}) - s.AddKnownTypeWithName(externalGV.WithKind("ObjectTest"), &ObjectTestExternal{}) + s.AddKnownTypes(internalGV, &runtimetesting.EmbeddedTest{}) + s.AddKnownTypeWithName(externalGV.WithKind("EmbeddedTest"), &runtimetesting.EmbeddedTestExternal{}) + s.AddKnownTypes(internalGV, &runtimetesting.ObjectTest{}) + s.AddKnownTypeWithName(externalGV.WithKind("ObjectTest"), &runtimetesting.ObjectTestExternal{}) codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) innerItems := []runtime.Object{ - &EmbeddedTest{ID: "baz"}, + &runtimetesting.EmbeddedTest{ID: "baz"}, } items := []runtime.Object{ - &EmbeddedTest{ID: "foo"}, - &EmbeddedTest{ID: "bar"}, + &runtimetesting.EmbeddedTest{ID: "foo"}, + &runtimetesting.EmbeddedTest{ID: "bar"}, // TODO: until YAML is removed, this JSON must be in ascending key order to ensure consistent roundtrip serialization &runtime.Unknown{ Raw: []byte(`{"apiVersion":"unknown.group/unknown","foo":"bar","kind":"OtherTest"}`), ContentType: runtime.ContentTypeJSON, }, - &ObjectTest{ + &runtimetesting.ObjectTest{ Items: runtime.NewEncodableList(codec, innerItems), }, } - internal := &ObjectTest{ + internal := &runtimetesting.ObjectTest{ Items: runtime.NewEncodableList(codec, items), } wire, err := runtime.Encode(codec, internal) @@ -133,13 +101,13 @@ func TestArrayOfRuntimeObject(t *testing.T) { } t.Logf("Wire format is:\n%s\n", string(wire)) - obj := &ObjectTestExternal{} + obj := &runtimetesting.ObjectTestExternal{} if err := json.Unmarshal(wire, obj); err != nil { t.Fatalf("unexpected error: %v", err) } t.Logf("exact wire is: %s", string(obj.Items[0].Raw)) - items[3] = &ObjectTest{Items: innerItems} + items[3] = &runtimetesting.ObjectTest{Items: innerItems} internal.Items = items decoded, err := runtime.Decode(codec, wire) @@ -178,15 +146,15 @@ func TestNestedObject(t *testing.T) { embeddedTestExternalGVK := externalGV.WithKind("EmbeddedTest") s := runtime.NewScheme() - s.AddKnownTypes(internalGV, &EmbeddedTest{}) - s.AddKnownTypeWithName(embeddedTestExternalGVK, &EmbeddedTestExternal{}) + s.AddKnownTypes(internalGV, &runtimetesting.EmbeddedTest{}) + s.AddKnownTypeWithName(embeddedTestExternalGVK, &runtimetesting.EmbeddedTestExternal{}) codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) - inner := &EmbeddedTest{ + inner := &runtimetesting.EmbeddedTest{ ID: "inner", } - outer := &EmbeddedTest{ + outer := &runtimetesting.EmbeddedTest{ ID: "outer", Object: runtime.NewEncodable(codec, inner), } @@ -210,18 +178,18 @@ func TestNestedObject(t *testing.T) { t.Errorf("Expected unequal %#v %#v", e, a) } - obj, err := runtime.Decode(codec, decoded.(*EmbeddedTest).Object.(*runtime.Unknown).Raw) + obj, err := runtime.Decode(codec, decoded.(*runtimetesting.EmbeddedTest).Object.(*runtime.Unknown).Raw) if err != nil { t.Fatal(err) } - decoded.(*EmbeddedTest).Object = obj + decoded.(*runtimetesting.EmbeddedTest).Object = obj if e, a := outer, decoded; !reflect.DeepEqual(e, a) { t.Errorf("Expected equal %#v %#v", e, a) } // test JSON decoding of the external object, which should preserve // raw bytes - var externalViaJSON EmbeddedTestExternal + var externalViaJSON runtimetesting.EmbeddedTestExternal err = json.Unmarshal(wire, &externalViaJSON) if err != nil { t.Fatalf("Unexpected decode error %v", err) @@ -237,7 +205,7 @@ func TestNestedObject(t *testing.T) { // Generic Unmarshalling of JSON cannot load the nested objects because there is // no default schema set. Consumers wishing to get direct JSON decoding must use // the external representation - var decodedViaJSON EmbeddedTest + var decodedViaJSON runtimetesting.EmbeddedTest err = json.Unmarshal(wire, &decodedViaJSON) if err == nil { t.Fatal("Expeceted decode error") @@ -257,12 +225,12 @@ func TestDeepCopyOfRuntimeObject(t *testing.T) { embeddedTestExternalGVK := externalGV.WithKind("EmbeddedTest") s := runtime.NewScheme() - s.AddKnownTypes(internalGV, &EmbeddedTest{}) - s.AddKnownTypeWithName(embeddedTestExternalGVK, &EmbeddedTestExternal{}) + s.AddKnownTypes(internalGV, &runtimetesting.EmbeddedTest{}) + s.AddKnownTypeWithName(embeddedTestExternalGVK, &runtimetesting.EmbeddedTestExternal{}) - original := &EmbeddedTest{ + original := &runtimetesting.EmbeddedTest{ ID: "outer", - Object: &EmbeddedTest{ + Object: &runtimetesting.EmbeddedTest{ ID: "inner", }, } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/interfaces.go b/staging/src/k8s.io/apimachinery/pkg/runtime/interfaces.go index fcb18ba111f..281f8d23c7c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -242,10 +242,14 @@ type Unstructured interface { // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected // to bypass conversion. IsUnstructuredObject() - // IsList returns true if this type is a list or matches the list convention - has an array called "items". - IsList() bool // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. UnstructuredContent() map[string]interface{} + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // EachListItem should pass a single item out of the list as an Object to the provided function. Any + // error should terminate the iteration. If IsList() returns false, this method should return an error + // instead of calling the provided function. + EachListItem(func(Object) error) error } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/register.go b/staging/src/k8s.io/apimachinery/pkg/runtime/register.go index 2ec6db8201f..eeb380c3dc3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/register.go @@ -28,7 +28,7 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } // GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind // interface if no objects are provided, or the ObjectKind interface of the object in the diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme_test.go index 8b56ece2265..9382dc96ac5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme_test.go @@ -28,31 +28,19 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + runtimetesting "k8s.io/apimachinery/pkg/runtime/testing" "k8s.io/apimachinery/pkg/util/diff" ) var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.") -type InternalSimple struct { - runtime.TypeMeta `json:",inline"` - TestString string `json:"testString"` -} - -type ExternalSimple struct { - runtime.TypeMeta `json:",inline"` - TestString string `json:"testString"` -} - -func (obj *InternalSimple) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ExternalSimple) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - func TestScheme(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} scheme := runtime.NewScheme() - scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &runtimetesting.InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &runtimetesting.ExternalSimple{}) // If set, would clear TypeMeta during conversion. //scheme.AddIgnoredConversionType(&TypeMeta{}, &TypeMeta{}) @@ -65,13 +53,13 @@ func TestScheme(t *testing.T) { // Register functions to verify that scope.Meta() gets set correctly. err := scheme.AddConversionFuncs( - func(in *InternalSimple, out *ExternalSimple, scope conversion.Scope) error { + func(in *runtimetesting.InternalSimple, out *runtimetesting.ExternalSimple, scope conversion.Scope) error { scope.Convert(&in.TypeMeta, &out.TypeMeta, 0) scope.Convert(&in.TestString, &out.TestString, 0) internalToExternalCalls++ return nil }, - func(in *ExternalSimple, out *InternalSimple, scope conversion.Scope) error { + func(in *runtimetesting.ExternalSimple, out *runtimetesting.InternalSimple, scope conversion.Scope) error { scope.Convert(&in.TypeMeta, &out.TypeMeta, 0) scope.Convert(&in.TestString, &out.TestString, 0) externalToInternalCalls++ @@ -87,7 +75,7 @@ func TestScheme(t *testing.T) { info, _ := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) jsonserializer := info.Serializer - simple := &InternalSimple{ + simple := &runtimetesting.InternalSimple{ TestString: "foo", } @@ -102,14 +90,14 @@ func TestScheme(t *testing.T) { if err != nil { t.Fatal(err) } - if _, ok := obj2.(*InternalSimple); !ok { + if _, ok := obj2.(*runtimetesting.InternalSimple); !ok { t.Fatalf("Got wrong type") } if e, a := simple, obj2; !reflect.DeepEqual(e, a) { t.Errorf("Expected:\n %#v,\n Got:\n %#v", e, a) } - obj3 := &InternalSimple{} + obj3 := &runtimetesting.InternalSimple{} if err := runtime.DecodeInto(codec, data, obj3); err != nil { t.Fatal(err) } @@ -124,12 +112,12 @@ func TestScheme(t *testing.T) { if err != nil { t.Fatal(err) } - if _, ok := obj4.(*ExternalSimple); !ok { + if _, ok := obj4.(*runtimetesting.ExternalSimple); !ok { t.Fatalf("Got wrong type") } // Test Convert - external := &ExternalSimple{} + external := &runtimetesting.ExternalSimple{} err = scheme.Convert(simple, external, nil) if err != nil { t.Fatalf("Unexpected error: %v", err) @@ -168,50 +156,13 @@ func TestBadJSONRejection(t *testing.T) { }*/ } -type ExtensionA struct { - runtime.TypeMeta `json:",inline"` - TestString string `json:"testString"` -} - -type ExtensionB struct { - runtime.TypeMeta `json:",inline"` - TestString string `json:"testString"` -} - -type ExternalExtensionType struct { - runtime.TypeMeta `json:",inline"` - Extension runtime.RawExtension `json:"extension"` -} - -type InternalExtensionType struct { - runtime.TypeMeta `json:",inline"` - Extension runtime.Object `json:"extension"` -} - -type ExternalOptionalExtensionType struct { - runtime.TypeMeta `json:",inline"` - Extension runtime.RawExtension `json:"extension,omitempty"` -} - -type InternalOptionalExtensionType struct { - runtime.TypeMeta `json:",inline"` - Extension runtime.Object `json:"extension,omitempty"` -} - -func (obj *ExtensionA) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ExtensionB) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ExternalExtensionType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *InternalExtensionType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *ExternalOptionalExtensionType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *InternalOptionalExtensionType) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - func TestExternalToInternalMapping(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} scheme := runtime.NewScheme() - scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &InternalOptionalExtensionType{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &ExternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &runtimetesting.InternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &runtimetesting.ExternalOptionalExtensionType{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) @@ -220,7 +171,7 @@ func TestExternalToInternalMapping(t *testing.T) { encoded string }{ { - &InternalOptionalExtensionType{Extension: nil}, + &runtimetesting.InternalOptionalExtensionType{Extension: nil}, `{"kind":"OptionalExtensionType","apiVersion":"` + externalGV.String() + `"}`, }, } @@ -240,17 +191,17 @@ func TestExtensionMapping(t *testing.T) { externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} scheme := runtime.NewScheme() - scheme.AddKnownTypeWithName(internalGV.WithKind("ExtensionType"), &InternalExtensionType{}) - scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &InternalOptionalExtensionType{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("ExtensionType"), &ExternalExtensionType{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &ExternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("ExtensionType"), &runtimetesting.InternalExtensionType{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &runtimetesting.InternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("ExtensionType"), &runtimetesting.ExternalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &runtimetesting.ExternalOptionalExtensionType{}) // register external first when the object is the same in both schemes, so ObjectVersionAndKind reports the // external version. - scheme.AddKnownTypeWithName(externalGV.WithKind("A"), &ExtensionA{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("B"), &ExtensionB{}) - scheme.AddKnownTypeWithName(internalGV.WithKind("A"), &ExtensionA{}) - scheme.AddKnownTypeWithName(internalGV.WithKind("B"), &ExtensionB{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("A"), &runtimetesting.ExtensionA{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("B"), &runtimetesting.ExtensionB{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("A"), &runtimetesting.ExtensionA{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("B"), &runtimetesting.ExtensionB{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) @@ -260,10 +211,10 @@ func TestExtensionMapping(t *testing.T) { encoded string }{ { - &InternalExtensionType{ - Extension: runtime.NewEncodable(codec, &ExtensionA{TestString: "foo"}), + &runtimetesting.InternalExtensionType{ + Extension: runtime.NewEncodable(codec, &runtimetesting.ExtensionA{TestString: "foo"}), }, - &InternalExtensionType{ + &runtimetesting.InternalExtensionType{ Extension: &runtime.Unknown{ Raw: []byte(`{"apiVersion":"test.group/testExternal","kind":"A","testString":"foo"}`), ContentType: runtime.ContentTypeJSON, @@ -273,8 +224,8 @@ func TestExtensionMapping(t *testing.T) { `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":{"apiVersion":"test.group/testExternal","kind":"A","testString":"foo"}} `, }, { - &InternalExtensionType{Extension: runtime.NewEncodable(codec, &ExtensionB{TestString: "bar"})}, - &InternalExtensionType{ + &runtimetesting.InternalExtensionType{Extension: runtime.NewEncodable(codec, &runtimetesting.ExtensionB{TestString: "bar"})}, + &runtimetesting.InternalExtensionType{ Extension: &runtime.Unknown{ Raw: []byte(`{"apiVersion":"test.group/testExternal","kind":"B","testString":"bar"}`), ContentType: runtime.ContentTypeJSON, @@ -284,8 +235,8 @@ func TestExtensionMapping(t *testing.T) { `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":{"apiVersion":"test.group/testExternal","kind":"B","testString":"bar"}} `, }, { - &InternalExtensionType{Extension: nil}, - &InternalExtensionType{ + &runtimetesting.InternalExtensionType{Extension: nil}, + &runtimetesting.InternalExtensionType{ Extension: nil, }, `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":null} @@ -315,12 +266,12 @@ func TestEncode(t *testing.T) { externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} scheme := runtime.NewScheme() - scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &runtimetesting.InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &runtimetesting.ExternalSimple{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) - test := &InternalSimple{ + test := &runtimetesting.InternalSimple{ TestString: "I'm the same", } obj := runtime.Object(test) @@ -329,7 +280,7 @@ func TestEncode(t *testing.T) { if err != nil || err2 != nil { t.Fatalf("Failure: '%v' '%v'", err, err2) } - if _, ok := obj2.(*InternalSimple); !ok { + if _, ok := obj2.(*runtimetesting.InternalSimple); !ok { t.Fatalf("Got wrong type") } if !reflect.DeepEqual(obj2, test) { @@ -346,18 +297,18 @@ func TestUnversionedTypes(t *testing.T) { otherGV := schema.GroupVersion{Group: "group", Version: "other"} scheme := runtime.NewScheme() - scheme.AddUnversionedTypes(externalGV, &InternalSimple{}) - scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) - scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) - scheme.AddKnownTypeWithName(otherGV.WithKind("Simple"), &ExternalSimple{}) + scheme.AddUnversionedTypes(externalGV, &runtimetesting.InternalSimple{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &runtimetesting.InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &runtimetesting.ExternalSimple{}) + scheme.AddKnownTypeWithName(otherGV.WithKind("Simple"), &runtimetesting.ExternalSimple{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) - if unv, ok := scheme.IsUnversioned(&InternalSimple{}); !unv || !ok { + if unv, ok := scheme.IsUnversioned(&runtimetesting.InternalSimple{}); !unv || !ok { t.Fatalf("type not unversioned and in scheme: %t %t", unv, ok) } - kinds, _, err := scheme.ObjectKinds(&InternalSimple{}) + kinds, _, err := scheme.ObjectKinds(&runtimetesting.InternalSimple{}) if err != nil { t.Fatal(err) } @@ -366,7 +317,7 @@ func TestUnversionedTypes(t *testing.T) { t.Fatalf("unexpected: %#v", kind) } - test := &InternalSimple{ + test := &runtimetesting.InternalSimple{ TestString: "I'm the same", } obj := runtime.Object(test) @@ -378,7 +329,7 @@ func TestUnversionedTypes(t *testing.T) { if err != nil { t.Fatal(err) } - if _, ok := obj2.(*InternalSimple); !ok { + if _, ok := obj2.(*runtimetesting.InternalSimple); !ok { t.Fatalf("Got wrong type") } if !reflect.DeepEqual(obj2, test) { @@ -400,107 +351,9 @@ func TestUnversionedTypes(t *testing.T) { } } -// Test a weird version/kind embedding format. -type MyWeirdCustomEmbeddedVersionKindField struct { - ID string `json:"ID,omitempty"` - APIVersion string `json:"myVersionKey,omitempty"` - ObjectKind string `json:"myKindKey,omitempty"` - Z string `json:"Z,omitempty"` - Y uint64 `json:"Y,omitempty"` -} - -type TestType1 struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` - C int8 `json:"C,omitempty"` - D int16 `json:"D,omitempty"` - E int32 `json:"E,omitempty"` - F int64 `json:"F,omitempty"` - G uint `json:"G,omitempty"` - H uint8 `json:"H,omitempty"` - I uint16 `json:"I,omitempty"` - J uint32 `json:"J,omitempty"` - K uint64 `json:"K,omitempty"` - L bool `json:"L,omitempty"` - M map[string]int `json:"M,omitempty"` - N map[string]TestType2 `json:"N,omitempty"` - O *TestType2 `json:"O,omitempty"` - P []TestType2 `json:"Q,omitempty"` -} - -type TestType2 struct { - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` -} - -type ExternalTestType2 struct { - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` -} -type ExternalTestType1 struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` - C int8 `json:"C,omitempty"` - D int16 `json:"D,omitempty"` - E int32 `json:"E,omitempty"` - F int64 `json:"F,omitempty"` - G uint `json:"G,omitempty"` - H uint8 `json:"H,omitempty"` - I uint16 `json:"I,omitempty"` - J uint32 `json:"J,omitempty"` - K uint64 `json:"K,omitempty"` - L bool `json:"L,omitempty"` - M map[string]int `json:"M,omitempty"` - N map[string]ExternalTestType2 `json:"N,omitempty"` - O *ExternalTestType2 `json:"O,omitempty"` - P []ExternalTestType2 `json:"Q,omitempty"` -} - -type ExternalInternalSame struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A TestType2 `json:"A,omitempty"` -} - -type UnversionedType struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` -} - -type UnknownType struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` -} - -func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() schema.ObjectKind { return obj } -func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() -} -func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) -} - -func (obj *ExternalInternalSame) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *TestType1) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *ExternalTestType1) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *TestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } -func (obj *ExternalTestType2) GetObjectKind() schema.ObjectKind { - return schema.EmptyObjectKind -} - // TestObjectFuzzer can randomly populate all the above objects. var TestObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 100).Funcs( - func(j *MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { + func(j *runtimetesting.MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { // We have to customize the randomization of MyWeirdCustomEmbeddedVersionKindFields because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" @@ -520,17 +373,17 @@ func GetTestScheme() *runtime.Scheme { // Ordinarily, we wouldn't add TestType2, but because this is a test and // both types are from the same package, we need to get it into the system // so that converter will match it with ExternalType2. - s.AddKnownTypes(internalGV, &TestType1{}, &TestType2{}, &ExternalInternalSame{}) - s.AddKnownTypes(externalGV, &ExternalInternalSame{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &ExternalTestType2{}) - s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &TestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &ExternalTestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType4"), &ExternalTestType1{}) - s.AddKnownTypeWithName(alternateExternalGV.WithKind("TestType3"), &ExternalTestType1{}) - s.AddKnownTypeWithName(alternateExternalGV.WithKind("TestType5"), &ExternalTestType1{}) - s.AddKnownTypeWithName(differentExternalGV.WithKind("TestType1"), &ExternalTestType1{}) - s.AddUnversionedTypes(externalGV, &UnversionedType{}) + s.AddKnownTypes(internalGV, &runtimetesting.TestType1{}, &runtimetesting.TestType2{}, &runtimetesting.ExternalInternalSame{}) + s.AddKnownTypes(externalGV, &runtimetesting.ExternalInternalSame{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &runtimetesting.ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &runtimetesting.ExternalTestType2{}) + s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &runtimetesting.TestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &runtimetesting.ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType4"), &runtimetesting.ExternalTestType1{}) + s.AddKnownTypeWithName(alternateExternalGV.WithKind("TestType3"), &runtimetesting.ExternalTestType1{}) + s.AddKnownTypeWithName(alternateExternalGV.WithKind("TestType5"), &runtimetesting.ExternalTestType1{}) + s.AddKnownTypeWithName(differentExternalGV.WithKind("TestType1"), &runtimetesting.ExternalTestType1{}) + s.AddUnversionedTypes(externalGV, &runtimetesting.UnversionedType{}) return s } @@ -552,8 +405,8 @@ func TestAddKnownTypesIdemPotent(t *testing.T) { s := runtime.NewScheme() gv := schema.GroupVersion{Group: "foo", Version: "v1"} - s.AddKnownTypes(gv, &InternalSimple{}) - s.AddKnownTypes(gv, &InternalSimple{}) + s.AddKnownTypes(gv, &runtimetesting.InternalSimple{}) + s.AddKnownTypes(gv, &runtimetesting.InternalSimple{}) if len(s.KnownTypes(gv)) != 1 { t.Errorf("expected only one %v type after double registration", gv) } @@ -561,8 +414,8 @@ func TestAddKnownTypesIdemPotent(t *testing.T) { t.Errorf("expected only one type after double registration") } - s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &InternalSimple{}) - s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &InternalSimple{}) + s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &runtimetesting.InternalSimple{}) + s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &runtimetesting.InternalSimple{}) if len(s.KnownTypes(gv)) != 1 { t.Errorf("expected only one %v type after double registration with custom name", gv) } @@ -570,8 +423,8 @@ func TestAddKnownTypesIdemPotent(t *testing.T) { t.Errorf("expected only one type after double registration with custom name") } - s.AddUnversionedTypes(gv, &InternalSimple{}) - s.AddUnversionedTypes(gv, &InternalSimple{}) + s.AddUnversionedTypes(gv, &runtimetesting.InternalSimple{}) + s.AddUnversionedTypes(gv, &runtimetesting.InternalSimple{}) if len(s.KnownTypes(gv)) != 1 { t.Errorf("expected only one %v type after double registration with custom name", gv) } @@ -579,7 +432,7 @@ func TestAddKnownTypesIdemPotent(t *testing.T) { t.Errorf("expected only one type after double registration with custom name") } - kinds, _, err := s.ObjectKinds(&InternalSimple{}) + kinds, _, err := s.ObjectKinds(&runtimetesting.InternalSimple{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -588,10 +441,13 @@ func TestAddKnownTypesIdemPotent(t *testing.T) { } } -// EmbeddableTypeMeta passes GetObjectKind to the type which embeds it. -type EmbeddableTypeMeta runtime.TypeMeta +// redefine InternalSimple with the same name, but obviously as a different type than in runtimetesting +type InternalSimple struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} -func (tm *EmbeddableTypeMeta) GetObjectKind() schema.ObjectKind { return (*runtime.TypeMeta)(tm) } +func (s *InternalSimple) DeepCopyObject() runtime.Object { return nil } func TestConflictingAddKnownTypes(t *testing.T) { s := runtime.NewScheme() @@ -604,8 +460,8 @@ func TestConflictingAddKnownTypes(t *testing.T) { panicked <- true } }() - s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &InternalSimple{}) - s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &ExternalSimple{}) + s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &runtimetesting.InternalSimple{}) + s.AddKnownTypeWithName(gv.WithKind("InternalSimple"), &runtimetesting.ExternalSimple{}) panicked <- false }() if !<-panicked { @@ -619,13 +475,7 @@ func TestConflictingAddKnownTypes(t *testing.T) { } }() - s.AddUnversionedTypes(gv, &InternalSimple{}) - - // redefine InternalSimple with the same name, but obviously as a different type - type InternalSimple struct { - EmbeddableTypeMeta `json:",inline"` - TestString string `json:"testString"` - } + s.AddUnversionedTypes(gv, &runtimetesting.InternalSimple{}) s.AddUnversionedTypes(gv, &InternalSimple{}) panicked <- false }() @@ -636,12 +486,12 @@ func TestConflictingAddKnownTypes(t *testing.T) { func TestConvertToVersionBasic(t *testing.T) { s := GetTestScheme() - tt := &TestType1{A: "I'm not a pointer object"} + tt := &runtimetesting.TestType1{A: "I'm not a pointer object"} other, err := s.ConvertToVersion(tt, schema.GroupVersion{Version: "v1"}) if err != nil { t.Fatalf("Failure: %v", err) } - converted, ok := other.(*ExternalTestType1) + converted, ok := other.(*runtimetesting.ExternalTestType1) if !ok { t.Fatalf("Got wrong type: %T", other) } @@ -671,13 +521,13 @@ func TestConvertToVersion(t *testing.T) { // errors if the type is not registered in the scheme { scheme: GetTestScheme(), - in: &UnknownType{}, + in: &runtimetesting.UnknownType{}, errFn: func(err error) bool { return err != nil && runtime.IsNotRegisteredError(err) }, }, // errors if the group versioner returns no target { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: testGroupVersioner{}, errFn: func(err error) bool { return err != nil && strings.Contains(err.Error(), "is not suitable for converting") @@ -686,132 +536,132 @@ func TestConvertToVersion(t *testing.T) { // converts to internal { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: schema.GroupVersion{Version: "__internal"}, - out: &TestType1{A: "test"}, + out: &runtimetesting.TestType1{A: "test"}, }, // prefers the best match { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: schema.GroupVersions{{Version: "__internal"}, {Version: "v1"}}, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // unversioned type returned as-is { scheme: GetTestScheme(), - in: &UnversionedType{A: "test"}, + in: &runtimetesting.UnversionedType{A: "test"}, gv: schema.GroupVersions{{Version: "v1"}}, same: true, - out: &UnversionedType{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "UnversionedType"}, + out: &runtimetesting.UnversionedType{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "UnversionedType"}, A: "test", }, }, // unversioned type returned when not included in the target types { scheme: GetTestScheme(), - in: &UnversionedType{A: "test"}, + in: &runtimetesting.UnversionedType{A: "test"}, gv: schema.GroupVersions{{Group: "other", Version: "v2"}}, same: true, - out: &UnversionedType{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "UnversionedType"}, + out: &runtimetesting.UnversionedType{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "UnversionedType"}, A: "test", }, }, // detected as already being in the target version { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: schema.GroupVersions{{Version: "v1"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // detected as already being in the first target version { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: schema.GroupVersions{{Version: "v1"}, {Version: "__internal"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // detected as already being in the first target version { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: schema.GroupVersions{{Version: "v1"}, {Version: "__internal"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // the external type is registered in multiple groups, versions, and kinds, and can be targeted to all of them (1/3): different kind { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: testGroupVersioner{ok: true, target: schema.GroupVersionKind{Kind: "TestType3", Version: "v1"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType3"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType3"}, A: "test", }, }, // the external type is registered in multiple groups, versions, and kinds, and can be targeted to all of them (2/3): different gv { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: testGroupVersioner{ok: true, target: schema.GroupVersionKind{Kind: "TestType3", Group: "custom", Version: "v1"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType3"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType3"}, A: "test", }, }, // the external type is registered in multiple groups, versions, and kinds, and can be targeted to all of them (3/3): different gvk { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: testGroupVersioner{ok: true, target: schema.GroupVersionKind{Group: "custom", Version: "v1", Kind: "TestType5"}}, same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType5"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType5"}, A: "test", }, }, // multi group versioner recognizes multiple groups and forces the output to a particular version, copies because version differs { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: runtime.NewMultiGroupVersioner(schema.GroupVersion{Group: "other", Version: "v2"}, schema.GroupKind{Group: "custom", Kind: "TestType3"}, schema.GroupKind{Kind: "TestType1"}), - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "other/v2", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "other/v2", ObjectKind: "TestType1"}, A: "test", }, }, // multi group versioner recognizes multiple groups and forces the output to a particular version, copies because version differs { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: runtime.NewMultiGroupVersioner(schema.GroupVersion{Group: "other", Version: "v2"}, schema.GroupKind{Kind: "TestType1"}, schema.GroupKind{Group: "custom", Kind: "TestType3"}), - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "other/v2", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "other/v2", ObjectKind: "TestType1"}, A: "test", }, }, // multi group versioner is unable to find a match when kind AND group don't match (there is no TestType1 kind in group "other", and no kind "TestType5" in the default group) { scheme: GetTestScheme(), - in: &TestType1{A: "test"}, + in: &runtimetesting.TestType1{A: "test"}, gv: runtime.NewMultiGroupVersioner(schema.GroupVersion{Group: "custom", Version: "v1"}, schema.GroupKind{Group: "other"}, schema.GroupKind{Kind: "TestType5"}), errFn: func(err error) bool { return err != nil && strings.Contains(err.Error(), "is not suitable for converting") @@ -820,42 +670,42 @@ func TestConvertToVersion(t *testing.T) { // multi group versioner recognizes multiple groups and forces the output to a particular version, performs no copy { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: runtime.NewMultiGroupVersioner(schema.GroupVersion{Group: "", Version: "v1"}, schema.GroupKind{Group: "custom", Kind: "TestType3"}, schema.GroupKind{Kind: "TestType1"}), same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // multi group versioner recognizes multiple groups and forces the output to a particular version, performs no copy { scheme: GetTestScheme(), - in: &ExternalTestType1{A: "test"}, + in: &runtimetesting.ExternalTestType1{A: "test"}, gv: runtime.NewMultiGroupVersioner(schema.GroupVersion{Group: "", Version: "v1"}, schema.GroupKind{Kind: "TestType1"}, schema.GroupKind{Group: "custom", Kind: "TestType3"}), same: true, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType1"}, A: "test", }, }, // group versioner can choose a particular target kind for a given input when kind is the same across group versions { scheme: GetTestScheme(), - in: &TestType1{A: "test"}, + in: &runtimetesting.TestType1{A: "test"}, gv: testGroupVersioner{ok: true, target: schema.GroupVersionKind{Version: "v1", Kind: "TestType3"}}, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType3"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "v1", ObjectKind: "TestType3"}, A: "test", }, }, // group versioner can choose a different kind { scheme: GetTestScheme(), - in: &TestType1{A: "test"}, + in: &runtimetesting.TestType1{A: "test"}, gv: testGroupVersioner{ok: true, target: schema.GroupVersionKind{Kind: "TestType5", Group: "custom", Version: "v1"}}, - out: &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType5"}, + out: &runtimetesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: runtimetesting.MyWeirdCustomEmbeddedVersionKindField{APIVersion: "custom/v1", ObjectKind: "TestType5"}, A: "test", }, }, @@ -914,21 +764,21 @@ func TestMetaValues(t *testing.T) { externalGV := schema.GroupVersion{Group: "test.group", Version: "externalVersion"} s := runtime.NewScheme() - s.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) - s.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + s.AddKnownTypeWithName(internalGV.WithKind("Simple"), &runtimetesting.InternalSimple{}) + s.AddKnownTypeWithName(externalGV.WithKind("Simple"), &runtimetesting.ExternalSimple{}) internalToExternalCalls := 0 externalToInternalCalls := 0 // Register functions to verify that scope.Meta() gets set correctly. err := s.AddConversionFuncs( - func(in *InternalSimple, out *ExternalSimple, scope conversion.Scope) error { + func(in *runtimetesting.InternalSimple, out *runtimetesting.ExternalSimple, scope conversion.Scope) error { t.Logf("internal -> external") scope.Convert(&in.TestString, &out.TestString, 0) internalToExternalCalls++ return nil }, - func(in *ExternalSimple, out *InternalSimple, scope conversion.Scope) error { + func(in *runtimetesting.ExternalSimple, out *runtimetesting.InternalSimple, scope conversion.Scope) error { t.Logf("external -> internal") scope.Convert(&in.TestString, &out.TestString, 0) externalToInternalCalls++ @@ -938,7 +788,7 @@ func TestMetaValues(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - simple := &InternalSimple{ + simple := &runtimetesting.InternalSimple{ TestString: "foo", } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD index d27fbabb8b9..2f5fbf74674 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -21,6 +21,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_test.go index d3b87f49d51..d27da113a96 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + serializertesting "k8s.io/apimachinery/pkg/runtime/serializer/testing" "k8s.io/apimachinery/pkg/util/diff" "github.com/ghodss/yaml" @@ -57,103 +58,15 @@ func (testMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.ObjectKind}, nil } -// Test a weird version/kind embedding format. -type MyWeirdCustomEmbeddedVersionKindField struct { - ID string `json:"ID,omitempty"` - APIVersion string `json:"myVersionKey,omitempty"` - ObjectKind string `json:"myKindKey,omitempty"` - Z string `json:"Z,omitempty"` - Y uint64 `json:"Y,omitempty"` -} - -type TestType1 struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` - C int8 `json:"C,omitempty"` - D int16 `json:"D,omitempty"` - E int32 `json:"E,omitempty"` - F int64 `json:"F,omitempty"` - G uint `json:"G,omitempty"` - H uint8 `json:"H,omitempty"` - I uint16 `json:"I,omitempty"` - J uint32 `json:"J,omitempty"` - K uint64 `json:"K,omitempty"` - L bool `json:"L,omitempty"` - M map[string]int `json:"M,omitempty"` - N map[string]TestType2 `json:"N,omitempty"` - O *TestType2 `json:"O,omitempty"` - P []TestType2 `json:"Q,omitempty"` -} - -type TestType2 struct { - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` -} - -type ExternalTestType2 struct { - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` -} -type ExternalTestType1 struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A string `json:"A,omitempty"` - B int `json:"B,omitempty"` - C int8 `json:"C,omitempty"` - D int16 `json:"D,omitempty"` - E int32 `json:"E,omitempty"` - F int64 `json:"F,omitempty"` - G uint `json:"G,omitempty"` - H uint8 `json:"H,omitempty"` - I uint16 `json:"I,omitempty"` - J uint32 `json:"J,omitempty"` - K uint64 `json:"K,omitempty"` - L bool `json:"L,omitempty"` - M map[string]int `json:"M,omitempty"` - N map[string]ExternalTestType2 `json:"N,omitempty"` - O *ExternalTestType2 `json:"O,omitempty"` - P []ExternalTestType2 `json:"Q,omitempty"` -} - -type ExternalInternalSame struct { - MyWeirdCustomEmbeddedVersionKindField `json:",inline"` - A TestType2 `json:"A,omitempty"` -} - // TestObjectFuzzer can randomly populate all the above objects. var TestObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 100).Funcs( - func(j *MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { + func(j *serializertesting.MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { c.FuzzNoCustom(j) j.APIVersion = "" j.ObjectKind = "" }, ) -func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() schema.ObjectKind { return obj } -func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() -} -func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) -} - -func (obj *ExternalInternalSame) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *TestType1) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *ExternalTestType1) GetObjectKind() schema.ObjectKind { - return &obj.MyWeirdCustomEmbeddedVersionKindField -} - -func (obj *TestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } -func (obj *ExternalTestType2) GetObjectKind() schema.ObjectKind { - return schema.EmptyObjectKind -} - // Returns a new Scheme set up with the test objects. func GetTestScheme() (*runtime.Scheme, runtime.Codec) { internalGV := schema.GroupVersion{Version: runtime.APIVersionInternal} @@ -164,13 +77,13 @@ func GetTestScheme() (*runtime.Scheme, runtime.Codec) { // Ordinarily, we wouldn't add TestType2, but because this is a test and // both types are from the same package, we need to get it into the system // so that converter will match it with ExternalType2. - s.AddKnownTypes(internalGV, &TestType1{}, &TestType2{}, &ExternalInternalSame{}) - s.AddKnownTypes(externalGV, &ExternalInternalSame{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &ExternalTestType2{}) - s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &TestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &ExternalTestType1{}) - s.AddKnownTypeWithName(externalGV2.WithKind("TestType1"), &ExternalTestType1{}) + s.AddKnownTypes(internalGV, &serializertesting.TestType1{}, &serializertesting.TestType2{}, &serializertesting.ExternalInternalSame{}) + s.AddKnownTypes(externalGV, &serializertesting.ExternalInternalSame{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &serializertesting.ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &serializertesting.ExternalTestType2{}) + s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &serializertesting.TestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &serializertesting.ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV2.WithKind("TestType1"), &serializertesting.ExternalTestType1{}) s.AddUnversionedTypes(externalGV, &metav1.Status{}) @@ -180,7 +93,7 @@ func GetTestScheme() (*runtime.Scheme, runtime.Codec) { } var semantic = conversion.EqualitiesOrDie( - func(a, b MyWeirdCustomEmbeddedVersionKindField) bool { + func(a, b serializertesting.MyWeirdCustomEmbeddedVersionKindField) bool { a.APIVersion, a.ObjectKind = "", "" b.APIVersion, b.ObjectKind = "", "" return a == b @@ -219,8 +132,8 @@ func runTest(t *testing.T, source interface{}) { func TestTypes(t *testing.T) { table := []interface{}{ - &TestType1{}, - &ExternalInternalSame{}, + &serializertesting.TestType1{}, + &serializertesting.ExternalInternalSame{}, } for _, item := range table { // Try a few times, since runTest uses random values. @@ -237,7 +150,7 @@ func TestVersionedEncoding(t *testing.T) { encoder := info.Serializer codec := cf.CodecForVersions(encoder, nil, schema.GroupVersion{Version: "v2"}, nil) - out, err := runtime.Encode(codec, &TestType1{}) + out, err := runtime.Encode(codec, &serializertesting.TestType1{}) if err != nil { t.Fatal(err) } @@ -246,14 +159,14 @@ func TestVersionedEncoding(t *testing.T) { } codec = cf.CodecForVersions(encoder, nil, schema.GroupVersion{Version: "v3"}, nil) - _, err = runtime.Encode(codec, &TestType1{}) + _, err = runtime.Encode(codec, &serializertesting.TestType1{}) if err == nil { t.Fatal(err) } // unversioned encode with no versions is written directly to wire codec = cf.CodecForVersions(encoder, nil, runtime.InternalGroupVersioner, nil) - out, err = runtime.Encode(codec, &TestType1{}) + out, err = runtime.Encode(codec, &serializertesting.TestType1{}) if err != nil { t.Fatal(err) } @@ -269,7 +182,7 @@ func TestMultipleNames(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - internal := obj.(*TestType1) + internal := obj.(*serializertesting.TestType1) if internal.A != "value" { t.Fatalf("unexpected decoded object: %#v", internal) } @@ -289,13 +202,13 @@ func TestConvertTypesWhenDefaultNamesMatch(t *testing.T) { s := runtime.NewScheme() // create two names internally, with TestType1 being preferred - s.AddKnownTypeWithName(internalGV.WithKind("TestType1"), &TestType1{}) - s.AddKnownTypeWithName(internalGV.WithKind("OtherType1"), &TestType1{}) + s.AddKnownTypeWithName(internalGV.WithKind("TestType1"), &serializertesting.TestType1{}) + s.AddKnownTypeWithName(internalGV.WithKind("OtherType1"), &serializertesting.TestType1{}) // create two names externally, with TestType1 being preferred - s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) - s.AddKnownTypeWithName(externalGV.WithKind("OtherType1"), &ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &serializertesting.ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("OtherType1"), &serializertesting.ExternalTestType1{}) - ext := &ExternalTestType1{} + ext := &serializertesting.ExternalTestType1{} ext.APIVersion = "v1" ext.ObjectKind = "OtherType1" ext.A = "test" @@ -303,7 +216,7 @@ func TestConvertTypesWhenDefaultNamesMatch(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - expect := &TestType1{A: "test"} + expect := &serializertesting.TestType1{A: "test"} codec := newCodecFactory(s, newSerializersForScheme(s, testMetaFactory{})).LegacyCodec(schema.GroupVersion{Version: "v1"}) @@ -315,7 +228,7 @@ func TestConvertTypesWhenDefaultNamesMatch(t *testing.T) { t.Errorf("unexpected object: %#v", obj) } - into := &TestType1{} + into := &serializertesting.TestType1{} if err := runtime.DecodeInto(codec, data, into); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -326,13 +239,13 @@ func TestConvertTypesWhenDefaultNamesMatch(t *testing.T) { func TestEncode_Ptr(t *testing.T) { _, codec := GetTestScheme() - tt := &TestType1{A: "I am a pointer object"} + tt := &serializertesting.TestType1{A: "I am a pointer object"} data, err := runtime.Encode(codec, tt) obj2, err2 := runtime.Decode(codec, data) if err != nil || err2 != nil { t.Fatalf("Failure: '%v' '%v'\n%s", err, err2, data) } - if _, ok := obj2.(*TestType1); !ok { + if _, ok := obj2.(*serializertesting.TestType1); !ok { t.Fatalf("Got wrong type") } if !semantic.DeepEqual(obj2, tt) { @@ -355,10 +268,10 @@ func TestBadJSONRejection(t *testing.T) { } } badJSONKindMismatch := []byte(`{"myVersionKey":"v1","myKindKey":"ExternalInternalSame"}`) - if err := runtime.DecodeInto(codec, badJSONKindMismatch, &TestType1{}); err == nil { + if err := runtime.DecodeInto(codec, badJSONKindMismatch, &serializertesting.TestType1{}); err == nil { t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) } - if err := runtime.DecodeInto(codec, []byte(``), &TestType1{}); err != nil { + if err := runtime.DecodeInto(codec, []byte(``), &serializertesting.TestType1{}); err != nil { t.Errorf("Should allow empty decode: %v", err) } if _, _, err := codec.Decode([]byte(``), &schema.GroupVersionKind{Kind: "ExternalInternalSame"}, nil); err == nil { @@ -387,8 +300,8 @@ func GetDirectCodecTestScheme() *runtime.Scheme { // Ordinarily, we wouldn't add TestType2, but because this is a test and // both types are from the same package, we need to get it into the system // so that converter will match it with ExternalType2. - s.AddKnownTypes(internalGV, &TestType1{}) - s.AddKnownTypes(externalGV, &ExternalTestType1{}) + s.AddKnownTypes(internalGV, &serializertesting.TestType1{}) + s.AddKnownTypes(externalGV, &serializertesting.ExternalTestType1{}) s.AddUnversionedTypes(externalGV, &metav1.Status{}) return s @@ -406,7 +319,7 @@ func TestDirectCodec(t *testing.T) { } directEncoder := df.EncoderForVersion(serializer, ignoredGV) directDecoder := df.DecoderToVersion(serializer, ignoredGV) - out, err := runtime.Encode(directEncoder, &ExternalTestType1{}) + out, err := runtime.Encode(directEncoder, &serializertesting.ExternalTestType1{}) if err != nil { t.Fatal(err) } @@ -414,8 +327,8 @@ func TestDirectCodec(t *testing.T) { t.Fatal(string(out)) } a, _, err := directDecoder.Decode(out, nil, nil) - e := &ExternalTestType1{ - MyWeirdCustomEmbeddedVersionKindField: MyWeirdCustomEmbeddedVersionKindField{ + e := &serializertesting.ExternalTestType1{ + MyWeirdCustomEmbeddedVersionKindField: serializertesting.MyWeirdCustomEmbeddedVersionKindField{ APIVersion: "v1", ObjectKind: "ExternalTestType1", }, diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/BUILD new file mode 100644 index 00000000000..12910cd16b2 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/BUILD @@ -0,0 +1,22 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "types.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/pkg/kubelet/dockershim/securitycontext/doc.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/doc.go similarity index 77% rename from pkg/kubelet/dockershim/securitycontext/doc.go rename to staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/doc.go index dd9a0a2291d..22968b17e42 100644 --- a/pkg/kubelet/dockershim/securitycontext/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/doc.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package securitycontext contains security context api implementations -package securitycontext // import "k8s.io/kubernetes/pkg/kubelet/dockershim/securitycontext" +// +k8s:deepcopy-gen=package + +package testing diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/types.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/types.go new file mode 100644 index 00000000000..f6eb069903c --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/types.go @@ -0,0 +1,109 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Test a weird version/kind embedding format. +// +k8s:deepcopy-gen=false +type MyWeirdCustomEmbeddedVersionKindField struct { + ID string `json:"ID,omitempty"` + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + Z string `json:"Z,omitempty"` + Y uint64 `json:"Y,omitempty"` +} + +type TestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]TestType2 `json:"N,omitempty"` + O *TestType2 `json:"O,omitempty"` + P []TestType2 `json:"Q,omitempty"` +} + +type TestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]ExternalTestType2 `json:"N,omitempty"` + O *ExternalTestType2 `json:"O,omitempty"` + P []ExternalTestType2 `json:"Q,omitempty"` +} + +type ExternalInternalSame struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A TestType2 `json:"A,omitempty"` +} + +func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() schema.ObjectKind { return obj } +func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() +} +func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) +} + +func (obj *ExternalInternalSame) GetObjectKind() schema.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType1) GetObjectKind() schema.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *ExternalTestType1) GetObjectKind() schema.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (obj *ExternalTestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go new file mode 100644 index 00000000000..ca6fa3393a9 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go @@ -0,0 +1,135 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package testing + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_testing_ExternalInternalSame, InType: reflect.TypeOf(&ExternalInternalSame{})}, + {Fn: DeepCopy_testing_ExternalTestType1, InType: reflect.TypeOf(&ExternalTestType1{})}, + {Fn: DeepCopy_testing_ExternalTestType2, InType: reflect.TypeOf(&ExternalTestType2{})}, + {Fn: DeepCopy_testing_TestType1, InType: reflect.TypeOf(&TestType1{})}, + {Fn: DeepCopy_testing_TestType2, InType: reflect.TypeOf(&TestType2{})}, + } +} + +// DeepCopy_testing_ExternalInternalSame is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalInternalSame(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalInternalSame) + out := out.(*ExternalInternalSame) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExternalTestType1 is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalTestType1(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalTestType1) + out := out.(*ExternalTestType1) + *out = *in + if in.M != nil { + in, out := &in.M, &out.M + *out = make(map[string]int) + for key, val := range *in { + (*out)[key] = val + } + } + if in.N != nil { + in, out := &in.N, &out.N + *out = make(map[string]ExternalTestType2) + for key, val := range *in { + (*out)[key] = val + } + } + if in.O != nil { + in, out := &in.O, &out.O + *out = new(ExternalTestType2) + **out = **in + } + if in.P != nil { + in, out := &in.P, &out.P + *out = make([]ExternalTestType2, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_testing_ExternalTestType2 is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalTestType2(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalTestType2) + out := out.(*ExternalTestType2) + *out = *in + return nil + } +} + +// DeepCopy_testing_TestType1 is an autogenerated deepcopy function. +func DeepCopy_testing_TestType1(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TestType1) + out := out.(*TestType1) + *out = *in + if in.M != nil { + in, out := &in.M, &out.M + *out = make(map[string]int) + for key, val := range *in { + (*out)[key] = val + } + } + if in.N != nil { + in, out := &in.N, &out.N + *out = make(map[string]TestType2) + for key, val := range *in { + (*out)[key] = val + } + } + if in.O != nil { + in, out := &in.O, &out.O + *out = new(TestType2) + **out = **in + } + if in.P != nil { + in, out := &in.P, &out.P + *out = make([]TestType2, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_testing_TestType2 is an autogenerated deepcopy function. +func DeepCopy_testing_TestType2(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TestType2) + out := out.(*TestType2) + *out = *in + return nil + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/BUILD new file mode 100644 index 00000000000..3186202d6ab --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/BUILD @@ -0,0 +1,23 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "types.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/doc.go b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/doc.go new file mode 100644 index 00000000000..a4903278543 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package testing diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/types.go b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/types.go new file mode 100644 index 00000000000..aeea7084bea --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/types.go @@ -0,0 +1,194 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type EmbeddedTest struct { + runtime.TypeMeta + ID string + Object runtime.Object + EmptyObject runtime.Object +} + +type EmbeddedTestExternal struct { + runtime.TypeMeta `json:",inline"` + ID string `json:"id,omitempty"` + Object runtime.RawExtension `json:"object,omitempty"` + EmptyObject runtime.RawExtension `json:"emptyObject,omitempty"` +} + +type ObjectTest struct { + runtime.TypeMeta + + ID string + Items []runtime.Object +} + +type ObjectTestExternal struct { + runtime.TypeMeta `yaml:",inline" json:",inline"` + + ID string `json:"id,omitempty"` + Items []runtime.RawExtension `json:"items,omitempty"` +} + +type InternalSimple struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExternalSimple struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExtensionA struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExtensionB struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExternalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.RawExtension `json:"extension"` +} + +type InternalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.Object `json:"extension"` +} + +type ExternalOptionalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.RawExtension `json:"extension,omitempty"` +} + +type InternalOptionalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.Object `json:"extension,omitempty"` +} + +type InternalComplex struct { + runtime.TypeMeta + String string + Integer int + Integer64 int64 + Int64 int64 + Bool bool +} + +type ExternalComplex struct { + runtime.TypeMeta `json:",inline"` + String string `json:"string" description:"testing"` + Integer int `json:"int"` + Integer64 int64 `json:",omitempty"` + Int64 int64 + Bool bool `json:"bool"` +} + +// Test a weird version/kind embedding format. +// +k8s:deepcopy-gen=false +type MyWeirdCustomEmbeddedVersionKindField struct { + ID string `json:"ID,omitempty"` + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + Z string `json:"Z,omitempty"` + Y uint64 `json:"Y,omitempty"` +} + +type TestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]TestType2 `json:"N,omitempty"` + O *TestType2 `json:"O,omitempty"` + P []TestType2 `json:"Q,omitempty"` +} + +type TestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]ExternalTestType2 `json:"N,omitempty"` + O *ExternalTestType2 `json:"O,omitempty"` + P []ExternalTestType2 `json:"Q,omitempty"` +} + +type ExternalInternalSame struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A TestType2 `json:"A,omitempty"` +} + +type UnversionedType struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` +} + +type UnknownType struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` +} + +func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() schema.ObjectKind { return obj } +func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() +} +func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) +} + +func (obj *TestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (obj *ExternalTestType2) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go new file mode 100644 index 00000000000..c95392f9150 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go @@ -0,0 +1,386 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package testing + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_testing_EmbeddedTest, InType: reflect.TypeOf(&EmbeddedTest{})}, + {Fn: DeepCopy_testing_EmbeddedTestExternal, InType: reflect.TypeOf(&EmbeddedTestExternal{})}, + {Fn: DeepCopy_testing_ExtensionA, InType: reflect.TypeOf(&ExtensionA{})}, + {Fn: DeepCopy_testing_ExtensionB, InType: reflect.TypeOf(&ExtensionB{})}, + {Fn: DeepCopy_testing_ExternalComplex, InType: reflect.TypeOf(&ExternalComplex{})}, + {Fn: DeepCopy_testing_ExternalExtensionType, InType: reflect.TypeOf(&ExternalExtensionType{})}, + {Fn: DeepCopy_testing_ExternalInternalSame, InType: reflect.TypeOf(&ExternalInternalSame{})}, + {Fn: DeepCopy_testing_ExternalOptionalExtensionType, InType: reflect.TypeOf(&ExternalOptionalExtensionType{})}, + {Fn: DeepCopy_testing_ExternalSimple, InType: reflect.TypeOf(&ExternalSimple{})}, + {Fn: DeepCopy_testing_ExternalTestType1, InType: reflect.TypeOf(&ExternalTestType1{})}, + {Fn: DeepCopy_testing_ExternalTestType2, InType: reflect.TypeOf(&ExternalTestType2{})}, + {Fn: DeepCopy_testing_InternalComplex, InType: reflect.TypeOf(&InternalComplex{})}, + {Fn: DeepCopy_testing_InternalExtensionType, InType: reflect.TypeOf(&InternalExtensionType{})}, + {Fn: DeepCopy_testing_InternalOptionalExtensionType, InType: reflect.TypeOf(&InternalOptionalExtensionType{})}, + {Fn: DeepCopy_testing_InternalSimple, InType: reflect.TypeOf(&InternalSimple{})}, + {Fn: DeepCopy_testing_ObjectTest, InType: reflect.TypeOf(&ObjectTest{})}, + {Fn: DeepCopy_testing_ObjectTestExternal, InType: reflect.TypeOf(&ObjectTestExternal{})}, + {Fn: DeepCopy_testing_TestType1, InType: reflect.TypeOf(&TestType1{})}, + {Fn: DeepCopy_testing_TestType2, InType: reflect.TypeOf(&TestType2{})}, + {Fn: DeepCopy_testing_UnknownType, InType: reflect.TypeOf(&UnknownType{})}, + {Fn: DeepCopy_testing_UnversionedType, InType: reflect.TypeOf(&UnversionedType{})}, + } +} + +// DeepCopy_testing_EmbeddedTest is an autogenerated deepcopy function. +func DeepCopy_testing_EmbeddedTest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmbeddedTest) + out := out.(*EmbeddedTest) + *out = *in + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.Object) + } + } + // in.EmptyObject is kind 'Interface' + if in.EmptyObject != nil { + if newVal, err := c.DeepCopy(&in.EmptyObject); err != nil { + return err + } else { + out.EmptyObject = *newVal.(*runtime.Object) + } + } + return nil + } +} + +// DeepCopy_testing_EmbeddedTestExternal is an autogenerated deepcopy function. +func DeepCopy_testing_EmbeddedTestExternal(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmbeddedTestExternal) + out := out.(*EmbeddedTestExternal) + *out = *in + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.RawExtension) + } + if newVal, err := c.DeepCopy(&in.EmptyObject); err != nil { + return err + } else { + out.EmptyObject = *newVal.(*runtime.RawExtension) + } + return nil + } +} + +// DeepCopy_testing_ExtensionA is an autogenerated deepcopy function. +func DeepCopy_testing_ExtensionA(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExtensionA) + out := out.(*ExtensionA) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExtensionB is an autogenerated deepcopy function. +func DeepCopy_testing_ExtensionB(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExtensionB) + out := out.(*ExtensionB) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExternalComplex is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalComplex(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalComplex) + out := out.(*ExternalComplex) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExternalExtensionType is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalExtensionType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalExtensionType) + out := out.(*ExternalExtensionType) + *out = *in + if newVal, err := c.DeepCopy(&in.Extension); err != nil { + return err + } else { + out.Extension = *newVal.(*runtime.RawExtension) + } + return nil + } +} + +// DeepCopy_testing_ExternalInternalSame is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalInternalSame(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalInternalSame) + out := out.(*ExternalInternalSame) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExternalOptionalExtensionType is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalOptionalExtensionType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalOptionalExtensionType) + out := out.(*ExternalOptionalExtensionType) + *out = *in + if newVal, err := c.DeepCopy(&in.Extension); err != nil { + return err + } else { + out.Extension = *newVal.(*runtime.RawExtension) + } + return nil + } +} + +// DeepCopy_testing_ExternalSimple is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalSimple(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalSimple) + out := out.(*ExternalSimple) + *out = *in + return nil + } +} + +// DeepCopy_testing_ExternalTestType1 is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalTestType1(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalTestType1) + out := out.(*ExternalTestType1) + *out = *in + if in.M != nil { + in, out := &in.M, &out.M + *out = make(map[string]int) + for key, val := range *in { + (*out)[key] = val + } + } + if in.N != nil { + in, out := &in.N, &out.N + *out = make(map[string]ExternalTestType2) + for key, val := range *in { + (*out)[key] = val + } + } + if in.O != nil { + in, out := &in.O, &out.O + *out = new(ExternalTestType2) + **out = **in + } + if in.P != nil { + in, out := &in.P, &out.P + *out = make([]ExternalTestType2, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_testing_ExternalTestType2 is an autogenerated deepcopy function. +func DeepCopy_testing_ExternalTestType2(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExternalTestType2) + out := out.(*ExternalTestType2) + *out = *in + return nil + } +} + +// DeepCopy_testing_InternalComplex is an autogenerated deepcopy function. +func DeepCopy_testing_InternalComplex(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalComplex) + out := out.(*InternalComplex) + *out = *in + return nil + } +} + +// DeepCopy_testing_InternalExtensionType is an autogenerated deepcopy function. +func DeepCopy_testing_InternalExtensionType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalExtensionType) + out := out.(*InternalExtensionType) + *out = *in + // in.Extension is kind 'Interface' + if in.Extension != nil { + if newVal, err := c.DeepCopy(&in.Extension); err != nil { + return err + } else { + out.Extension = *newVal.(*runtime.Object) + } + } + return nil + } +} + +// DeepCopy_testing_InternalOptionalExtensionType is an autogenerated deepcopy function. +func DeepCopy_testing_InternalOptionalExtensionType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalOptionalExtensionType) + out := out.(*InternalOptionalExtensionType) + *out = *in + // in.Extension is kind 'Interface' + if in.Extension != nil { + if newVal, err := c.DeepCopy(&in.Extension); err != nil { + return err + } else { + out.Extension = *newVal.(*runtime.Object) + } + } + return nil + } +} + +// DeepCopy_testing_InternalSimple is an autogenerated deepcopy function. +func DeepCopy_testing_InternalSimple(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalSimple) + out := out.(*InternalSimple) + *out = *in + return nil + } +} + +// DeepCopy_testing_ObjectTest is an autogenerated deepcopy function. +func DeepCopy_testing_ObjectTest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectTest) + out := out.(*ObjectTest) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.Object) + } + } + } + return nil + } +} + +// DeepCopy_testing_ObjectTestExternal is an autogenerated deepcopy function. +func DeepCopy_testing_ObjectTestExternal(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectTestExternal) + out := out.(*ObjectTestExternal) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.RawExtension) + } + } + } + return nil + } +} + +// DeepCopy_testing_TestType1 is an autogenerated deepcopy function. +func DeepCopy_testing_TestType1(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TestType1) + out := out.(*TestType1) + *out = *in + if in.M != nil { + in, out := &in.M, &out.M + *out = make(map[string]int) + for key, val := range *in { + (*out)[key] = val + } + } + if in.N != nil { + in, out := &in.N, &out.N + *out = make(map[string]TestType2) + for key, val := range *in { + (*out)[key] = val + } + } + if in.O != nil { + in, out := &in.O, &out.O + *out = new(TestType2) + **out = **in + } + if in.P != nil { + in, out := &in.P, &out.P + *out = make([]TestType2, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_testing_TestType2 is an autogenerated deepcopy function. +func DeepCopy_testing_TestType2(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TestType2) + out := out.(*TestType2) + *out = *in + return nil + } +} + +// DeepCopy_testing_UnknownType is an autogenerated deepcopy function. +func DeepCopy_testing_UnknownType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UnknownType) + out := out.(*UnknownType) + *out = *in + return nil + } +} + +// DeepCopy_testing_UnversionedType is an autogenerated deepcopy function. +func DeepCopy_testing_UnversionedType(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UnversionedType) + out := out.(*UnversionedType) + *out = *in + return nil + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/test/BUILD b/staging/src/k8s.io/apimachinery/pkg/test/BUILD new file mode 100644 index 00000000000..d06e754d2c7 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/test/BUILD @@ -0,0 +1,52 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "api_meta_help_test.go", + "api_meta_meta_test.go", + "apis_meta_v1_unstructed_unstructure_test.go", + "runtime_helper_test.go", + "runtime_serializer_protobuf_protobuf_test.go", + "runtime_unversioned_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + ], +) diff --git a/pkg/apimachinery/tests/api_meta_help_test.go b/staging/src/k8s.io/apimachinery/pkg/test/api_meta_help_test.go similarity index 76% rename from pkg/apimachinery/tests/api_meta_help_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/api_meta_help_test.go index bbcf2471a3b..42b6013ae3c 100644 --- a/pkg/apimachinery/tests/api_meta_help_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/api_meta_help_test.go @@ -14,22 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "reflect" "testing" - "k8s.io/api/core/v1" + fuzz "github.com/google/gofuzz" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/testapigroup" + "k8s.io/apimachinery/pkg/apis/testapigroup/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/kubernetes/pkg/api" - - "github.com/google/gofuzz" ) func TestIsList(t *testing.T) { @@ -37,8 +37,8 @@ func TestIsList(t *testing.T) { obj runtime.Object isList bool }{ - {&api.PodList{}, true}, - {&api.Pod{}, false}, + {&testapigroup.CarpList{}, true}, + {&testapigroup.Carp{}, false}, } for _, item := range tests { if e, a := item.isList, meta.IsListType(item.obj); e != a { @@ -49,31 +49,24 @@ func TestIsList(t *testing.T) { func TestExtractList(t *testing.T) { list1 := []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - &api.Service{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, } - list2 := &v1.List{ + list2 := &ListV1{ Items: []runtime.RawExtension{ {Raw: []byte("foo")}, {Raw: []byte("bar")}, - {Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "other"}}}, + {Object: &v1.Carp{ObjectMeta: metav1.ObjectMeta{Name: "other"}}}, }, } list3 := &fakePtrValueList{ - Items: []*api.Pod{ + Items: []*testapigroup.Carp{ {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, }, } - list4 := &api.PodList{ - Items: []api.Pod{ - {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "3"}}, - }, - } - list5 := &v1.PodList{ - Items: []v1.Pod{ + list4 := &testapigroup.CarpList{ + Items: []testapigroup.Carp{ {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, {ObjectMeta: metav1.ObjectMeta{Name: "3"}}, @@ -86,19 +79,15 @@ func TestExtractList(t *testing.T) { equal bool }{ { - in: &api.List{}, + in: &List{}, out: []interface{}{}, }, { - in: &v1.List{}, + in: &ListV1{}, out: []interface{}{}, }, { - in: &v1.PodList{}, - out: []interface{}{}, - }, - { - in: &api.List{Items: list1}, + in: &List{Items: list1}, out: []interface{}{list1[0], list1[1]}, }, { @@ -114,10 +103,6 @@ func TestExtractList(t *testing.T) { in: list4, out: []interface{}{&list4.Items[0], &list4.Items[1], &list4.Items[2]}, }, - { - in: list5, - out: []interface{}{&list5.Items[0], &list5.Items[1], &list5.Items[2]}, - }, } for i, test := range testCases { list, err := meta.ExtractList(test.in) @@ -142,31 +127,24 @@ func TestExtractList(t *testing.T) { func TestEachListItem(t *testing.T) { list1 := []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - &api.Service{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, } - list2 := &v1.List{ + list2 := &ListV1{ Items: []runtime.RawExtension{ {Raw: []byte("foo")}, {Raw: []byte("bar")}, - {Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "other"}}}, + {Object: &v1.Carp{ObjectMeta: metav1.ObjectMeta{Name: "other"}}}, }, } list3 := &fakePtrValueList{ - Items: []*api.Pod{ + Items: []*testapigroup.Carp{ {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, }, } - list4 := &api.PodList{ - Items: []api.Pod{ - {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "3"}}, - }, - } - list5 := &v1.PodList{ - Items: []v1.Pod{ + list4 := &testapigroup.CarpList{ + Items: []testapigroup.Carp{ {ObjectMeta: metav1.ObjectMeta{Name: "1"}}, {ObjectMeta: metav1.ObjectMeta{Name: "2"}}, {ObjectMeta: metav1.ObjectMeta{Name: "3"}}, @@ -178,19 +156,15 @@ func TestEachListItem(t *testing.T) { out []interface{} }{ { - in: &api.List{}, + in: &List{}, out: []interface{}{}, }, { - in: &v1.List{}, + in: &ListV1{}, out: []interface{}{}, }, { - in: &v1.PodList{}, - out: []interface{}{}, - }, - { - in: &api.List{Items: list1}, + in: &List{Items: list1}, out: []interface{}{list1[0], list1[1]}, }, { @@ -205,10 +179,6 @@ func TestEachListItem(t *testing.T) { in: list4, out: []interface{}{&list4.Items[0], &list4.Items[1], &list4.Items[2]}, }, - { - in: list5, - out: []interface{}{&list5.Items[0], &list5.Items[1], &list5.Items[2]}, - }, } for i, test := range testCases { list := []runtime.Object{} @@ -252,7 +222,7 @@ func TestExtractListOfInterfacePtrs(t *testing.T) { } type fakePtrValueList struct { - Items []*api.Pod + Items []*testapigroup.Carp } func (obj fakePtrValueList) GetObjectKind() schema.ObjectKind { @@ -260,11 +230,11 @@ func (obj fakePtrValueList) GetObjectKind() schema.ObjectKind { } func TestSetList(t *testing.T) { - pl := &api.PodList{} + pl := &testapigroup.CarpList{} list := []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "3"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "3"}}, } err := meta.SetList(pl, list) if err != nil { @@ -274,18 +244,18 @@ func TestSetList(t *testing.T) { t.Fatalf("Expected %v, got %v", e, a) } for i := range list { - if e, a := list[i].(*api.Pod).Name, pl.Items[i].Name; e != a { + if e, a := list[i].(*testapigroup.Carp).Name, pl.Items[i].Name; e != a { t.Fatalf("Expected %v, got %v", e, a) } } } func TestSetListToRuntimeObjectArray(t *testing.T) { - pl := &api.List{} + pl := &List{} list := []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "3"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "3"}}, } err := meta.SetList(pl, list) if err != nil { @@ -325,7 +295,7 @@ func TestSetListToMatchingType(t *testing.T) { func TestSetExtractListRoundTrip(t *testing.T) { fuzzer := fuzz.New().NilChance(0).NumElements(1, 5) for i := 0; i < 5; i++ { - start := &api.PodList{} + start := &testapigroup.CarpList{} fuzzer.Fuzz(&start.Items) list, err := meta.ExtractList(start) @@ -333,7 +303,7 @@ func TestSetExtractListRoundTrip(t *testing.T) { t.Errorf("Unexpected error %v", err) continue } - got := &api.PodList{} + got := &testapigroup.CarpList{} err = meta.SetList(got, list) if err != nil { t.Errorf("Unexpected error %v", err) diff --git a/pkg/apimachinery/tests/api_meta_meta_test.go b/staging/src/k8s.io/apimachinery/pkg/test/api_meta_meta_test.go similarity index 99% rename from pkg/apimachinery/tests/api_meta_meta_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/api_meta_meta_test.go index fd20c842a7c..18a99e46f20 100644 --- a/pkg/apimachinery/tests/api_meta_meta_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/api_meta_meta_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "reflect" @@ -24,14 +24,14 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/testapigroup" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" ) func TestAPIObjectMeta(t *testing.T) { - j := &api.Pod{ + j := &testapigroup.Carp{ TypeMeta: metav1.TypeMeta{APIVersion: "/a", Kind: "b"}, ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", @@ -394,7 +394,7 @@ func TestAccessOwnerReferences(t *testing.T) { // BenchmarkAccessorSetFastPath shows the interface fast path func BenchmarkAccessorSetFastPath(b *testing.B) { - obj := &api.Pod{ + obj := &testapigroup.Carp{ TypeMeta: metav1.TypeMeta{APIVersion: "/a", Kind: "b"}, ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", diff --git a/pkg/apimachinery/tests/apis_meta_v1_unstructed_unstructure_test.go b/staging/src/k8s.io/apimachinery/pkg/test/apis_meta_v1_unstructed_unstructure_test.go similarity index 72% rename from pkg/apimachinery/tests/apis_meta_v1_unstructed_unstructure_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/apis_meta_v1_unstructed_unstructure_test.go index 617e6a0f8ce..d6436214c7a 100644 --- a/pkg/apimachinery/tests/apis_meta_v1_unstructed_unstructure_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/apis_meta_v1_unstructed_unstructure_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "fmt" @@ -24,21 +24,21 @@ import ( "testing" "time" + apitesting "k8s.io/apimachinery/pkg/api/testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/testapigroup" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/api/validation" ) func TestDecodeUnstructured(t *testing.T) { - groupVersionString := api.Registry.GroupOrDie(api.GroupName).GroupVersion.String() + groupVersionString := "v1" rawJson := fmt.Sprintf(`{"kind":"Pod","apiVersion":"%s","metadata":{"name":"test"}}`, groupVersionString) - pl := &api.List{ + pl := &List{ Items: []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, &runtime.Unknown{ TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: groupVersionString}, Raw: []byte(rawJson), @@ -87,13 +87,13 @@ func TestDecode(t *testing.T) { }, }, { - json: []byte(`{"items": [{"metadata": {"name": "object1"}, "apiVersion": "test", "kind": "test_kind"}, {"metadata": {"name": "object2"}, "apiVersion": "test", "kind": "test_kind"}], "apiVersion": "test", "kind": "test_list"}`), + json: []byte(`{"items": [{"metadata": {"name": "object1", "deletionGracePeriodSeconds": 10}, "apiVersion": "test", "kind": "test_kind"}, {"metadata": {"name": "object2"}, "apiVersion": "test", "kind": "test_kind"}], "apiVersion": "test", "kind": "test_list"}`), want: &unstructured.UnstructuredList{ Object: map[string]interface{}{"apiVersion": "test", "kind": "test_list"}, Items: []unstructured.Unstructured{ { Object: map[string]interface{}{ - "metadata": map[string]interface{}{"name": "object1"}, + "metadata": map[string]interface{}{"name": "object1", "deletionGracePeriodSeconds": int64(10)}, "apiVersion": "test", "kind": "test_kind", }, @@ -125,19 +125,22 @@ func TestDecode(t *testing.T) { func TestUnstructuredGetters(t *testing.T) { trueVar := true + ten := int64(10) unstruct := unstructured.Unstructured{ Object: map[string]interface{}{ "kind": "test_kind", "apiVersion": "test_version", "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "generateName": "test_generateName", - "uid": "test_uid", - "resourceVersion": "test_resourceVersion", - "selfLink": "test_selfLink", - "creationTimestamp": "2009-11-10T23:00:00Z", - "deletionTimestamp": "2010-11-10T23:00:00Z", + "name": "test_name", + "namespace": "test_namespace", + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "generation": ten, + "deletionGracePeriodSeconds": ten, + "selfLink": "test_selfLink", + "creationTimestamp": "2009-11-10T23:00:00Z", + "deletionTimestamp": "2010-11-10T23:00:00Z", "labels": map[string]interface{}{ "test_label": "test_value", }, @@ -244,25 +247,34 @@ func TestUnstructuredGetters(t *testing.T) { if got, want := unstruct.GetClusterName(), "cluster123"; got != want { t.Errorf("GetClusterName()=%v, want %v", got, want) } + if got, want := unstruct.GetDeletionGracePeriodSeconds(), &ten; !reflect.DeepEqual(got, want) { + t.Errorf("GetDeletionGracePeriodSeconds()=%v, want %v", got, want) + } + if got, want := unstruct.GetGeneration(), ten; !reflect.DeepEqual(got, want) { + t.Errorf("GetGeneration()=%v, want %v", got, want) + } } func TestUnstructuredSetters(t *testing.T) { unstruct := unstructured.Unstructured{} trueVar := true + ten := int64(10) want := unstructured.Unstructured{ Object: map[string]interface{}{ "kind": "test_kind", "apiVersion": "test_version", "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "generateName": "test_generateName", - "uid": "test_uid", - "resourceVersion": "test_resourceVersion", - "selfLink": "test_selfLink", - "creationTimestamp": "2009-11-10T23:00:00Z", - "deletionTimestamp": "2010-11-10T23:00:00Z", + "name": "test_name", + "namespace": "test_namespace", + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + "creationTimestamp": "2009-11-10T23:00:00Z", + "deletionTimestamp": "2010-11-10T23:00:00Z", + "deletionGracePeriodSeconds": &ten, + "generation": ten, "labels": map[string]interface{}{ "test_label": "test_value", }, @@ -326,6 +338,8 @@ func TestUnstructuredSetters(t *testing.T) { unstruct.SetOwnerReferences(newOwnerReferences) unstruct.SetFinalizers([]string{"finalizer.1", "finalizer.2"}) unstruct.SetClusterName("cluster123") + unstruct.SetDeletionGracePeriodSeconds(&ten) + unstruct.SetGeneration(ten) if !reflect.DeepEqual(unstruct, want) { t.Errorf("Wanted: \n%s\n Got:\n%s", want, unstruct) @@ -435,7 +449,7 @@ func TestDecodeNumbers(t *testing.T) { // Start with a valid pod originalJSON := []byte(`{ - "kind":"Pod", + "kind":"Carp", "apiVersion":"v1", "metadata":{"name":"pod","namespace":"foo"}, "spec":{ @@ -444,21 +458,15 @@ func TestDecodeNumbers(t *testing.T) { } }`) - pod := &api.Pod{} + pod := &testapigroup.Carp{} - // Decode with structured codec - codec, err := testapi.GetCodecForObject(pod) + _, codecs := TestScheme() + codec := apitesting.TestCodec(codecs, schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}) + + err := runtime.DecodeInto(codec, originalJSON, pod) if err != nil { t.Fatalf("unexpected error: %v", err) } - err = runtime.DecodeInto(codec, originalJSON, pod) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - // ensure pod is valid - if errs := validation.ValidatePod(pod); len(errs) > 0 { - t.Fatalf("pod should be valid: %v", errs) - } // Round-trip with unstructured codec unstructuredObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, originalJSON) @@ -481,15 +489,82 @@ func TestDecodeNumbers(t *testing.T) { t.Fatalf("unexpected error: %v", err) } // ensure pod is still valid - pod2, ok := obj2.(*api.Pod) + pod2, ok := obj2.(*testapigroup.Carp) if !ok { t.Fatalf("expected an *api.Pod, got %#v", obj2) } - if errs := validation.ValidatePod(pod2); len(errs) > 0 { - t.Fatalf("pod should be valid: %v", errs) - } + // ensure round-trip preserved large integers if !reflect.DeepEqual(pod, pod2) { t.Fatalf("Expected\n\t%#v, got \n\t%#v", pod, pod2) } } + +// TestAccessorMethods does opaque roundtrip testing against an Unstructured +// instance's Object methods to ensure that what is "Set" matches what you +// subsequently "Get" without any assertions against internal state. +func TestAccessorMethods(t *testing.T) { + int64p := func(i int) *int64 { + v := int64(i) + return &v + } + tests := []struct { + accessor string + val interface{} + nilVal reflect.Value + }{ + {accessor: "Namespace", val: "foo"}, + {accessor: "Name", val: "bar"}, + {accessor: "GenerateName", val: "baz"}, + {accessor: "UID", val: types.UID("uid")}, + {accessor: "ResourceVersion", val: "1"}, + {accessor: "Generation", val: int64(5)}, + {accessor: "SelfLink", val: "/foo"}, + // TODO: Handle timestamps, which are being marshalled as UTC and + // unmarshalled as Local. + // https://github.com/kubernetes/kubernetes/issues/21402 + // {accessor: "CreationTimestamp", val: someTime}, + // {accessor: "DeletionTimestamp", val: someTimeP}, + {accessor: "DeletionTimestamp", nilVal: reflect.ValueOf((*metav1.Time)(nil))}, + {accessor: "DeletionGracePeriodSeconds", val: int64p(10)}, + {accessor: "DeletionGracePeriodSeconds", val: int64p(0)}, + {accessor: "DeletionGracePeriodSeconds", nilVal: reflect.ValueOf((*int64)(nil))}, + {accessor: "Labels", val: map[string]string{"foo": "bar"}}, + {accessor: "Annotations", val: map[string]string{"foo": "bar"}}, + {accessor: "Initializers", val: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "foo"}}}}, + {accessor: "Initializers", val: &metav1.Initializers{}}, + {accessor: "Initializers", nilVal: reflect.ValueOf((*metav1.Initializers)(nil))}, + {accessor: "Finalizers", val: []string{"foo"}}, + {accessor: "OwnerReferences", val: []metav1.OwnerReference{{Name: "foo"}}}, + {accessor: "ClusterName", val: "foo"}, + } + for i, test := range tests { + t.Logf("evaluating test %d (%s)", i, test.accessor) + + u := &unstructured.Unstructured{} + setter := reflect.ValueOf(u).MethodByName("Set" + test.accessor) + getter := reflect.ValueOf(u).MethodByName("Get" + test.accessor) + + args := []reflect.Value{} + if test.val != nil { + args = append(args, reflect.ValueOf(test.val)) + } else { + args = append(args, test.nilVal) + } + setter.Call(args) + + ret := getter.Call([]reflect.Value{}) + actual := ret[0].Interface() + + var expected interface{} + if test.val != nil { + expected = test.val + } else { + expected = test.nilVal.Interface() + } + + if e, a := expected, actual; !reflect.DeepEqual(e, a) { + t.Fatalf("%s: expected %v (%T), got %v (%T)", test.accessor, e, e, a, a) + } + } +} diff --git a/pkg/apimachinery/tests/runtime_helper_test.go b/staging/src/k8s.io/apimachinery/pkg/test/runtime_helper_test.go similarity index 60% rename from pkg/apimachinery/tests/runtime_helper_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/runtime_helper_test.go index e83d116791d..0106fb20bf3 100644 --- a/pkg/apimachinery/tests/runtime_helper_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/runtime_helper_test.go @@ -14,32 +14,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "testing" + apitesting "k8s.io/apimachinery/pkg/api/testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/testapigroup" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" ) func TestDecodeList(t *testing.T) { - pl := &api.List{ + pl := List{ Items: []runtime.Object{ - &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, + &testapigroup.Carp{ObjectMeta: metav1.ObjectMeta{Name: "1"}}, &runtime.Unknown{ - TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: api.Registry.GroupOrDie(api.GroupName).GroupVersion.String()}, - Raw: []byte(`{"kind":"Pod","apiVersion":"` + api.Registry.GroupOrDie(api.GroupName).GroupVersion.String() + `","metadata":{"name":"test"}}`), + TypeMeta: runtime.TypeMeta{Kind: "Carp", APIVersion: "v1"}, + Raw: []byte(`{"kind":"Carp","apiVersion":"` + "v1" + `","metadata":{"name":"test"}}`), ContentType: runtime.ContentTypeJSON, }, }, } - if errs := runtime.DecodeList(pl.Items, testapi.Default.Codec()); len(errs) != 0 { + + _, codecs := TestScheme() + Codec := apitesting.TestCodec(codecs, testapigroup.SchemeGroupVersion) + + if errs := runtime.DecodeList(pl.Items, Codec); len(errs) != 0 { t.Fatalf("unexpected error %v", errs) } - if pod, ok := pl.Items[1].(*api.Pod); !ok || pod.Name != "test" { + if pod, ok := pl.Items[1].(*testapigroup.Carp); !ok || pod.Name != "test" { t.Errorf("object not converted: %#v", pl.Items[1]) } } diff --git a/pkg/apimachinery/tests/runtime_serializer_protobuf_protobuf_test.go b/staging/src/k8s.io/apimachinery/pkg/test/runtime_serializer_protobuf_protobuf_test.go similarity index 94% rename from pkg/apimachinery/tests/runtime_serializer_protobuf_protobuf_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/runtime_serializer_protobuf_protobuf_test.go index 5d82fe376d1..818d243a720 100644 --- a/pkg/apimachinery/tests/runtime_serializer_protobuf_protobuf_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/runtime_serializer_protobuf_protobuf_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "bytes" @@ -24,15 +24,13 @@ import ( "strings" "testing" - "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/testapigroup/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/kubernetes/pkg/api" - _ "k8s.io/kubernetes/pkg/api/install" ) type testObject struct { @@ -274,16 +272,12 @@ func TestProtobufDecode(t *testing.T) { } func TestDecodeObjects(t *testing.T) { - obj1 := &v1.Pod{ + obj1 := &v1.Carp{ ObjectMeta: metav1.ObjectMeta{ Name: "cool", }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "test", - }, - }, + Spec: v1.CarpSpec{ + Hostname: "coolhost", }, } obj1wire, err := obj1.Marshal() @@ -292,7 +286,7 @@ func TestDecodeObjects(t *testing.T) { } wire1, err := (&runtime.Unknown{ - TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + TypeMeta: runtime.TypeMeta{Kind: "Carp", APIVersion: "v1"}, Raw: obj1wire, }).Marshal() if err != nil { @@ -300,7 +294,7 @@ func TestDecodeObjects(t *testing.T) { } unk2 := &runtime.Unknown{ - TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + TypeMeta: runtime.TypeMeta{Kind: "Carp", APIVersion: "v1"}, } wire2 := make([]byte, len(wire1)*2) n, err := unk2.NestedMarshalTo(wire2, obj1, uint64(obj1.Size())) @@ -323,9 +317,11 @@ func TestDecodeObjects(t *testing.T) { data: wire1, }, } - + scheme := runtime.NewScheme() for i, test := range testCases { - s := protobuf.NewSerializer(api.Scheme, api.Scheme, "application/protobuf") + scheme.AddKnownTypes(schema.GroupVersion{Version: "v1"}, &v1.Carp{}) + v1.AddToScheme(scheme) + s := protobuf.NewSerializer(scheme, scheme, "application/protobuf") obj, err := runtime.Decode(s, test.data) switch { diff --git a/pkg/apimachinery/tests/runtime_unversioned_test.go b/staging/src/k8s.io/apimachinery/pkg/test/runtime_unversioned_test.go similarity index 84% rename from pkg/apimachinery/tests/runtime_unversioned_test.go rename to staging/src/k8s.io/apimachinery/pkg/test/runtime_unversioned_test.go index 6ff785831c0..4bf833a93e8 100644 --- a/pkg/apimachinery/tests/runtime_unversioned_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/runtime_unversioned_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package test import ( "encoding/json" @@ -23,11 +23,10 @@ import ( // TODO: Ideally we should create the necessary package structure in e.g., // pkg/conversion/test/... instead of importing pkg/api here. + apitesting "k8s.io/apimachinery/pkg/api/testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/apimachinery/pkg/runtime/schema" ) func TestV1EncodeDecodeStatus(t *testing.T) { @@ -38,9 +37,10 @@ func TestV1EncodeDecodeStatus(t *testing.T) { Message: "", } - v1Codec := testapi.Default.Codec() + _, codecs := TestScheme() + codec := apitesting.TestCodec(codecs, schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}) - encoded, err := runtime.Encode(v1Codec, status) + encoded, err := runtime.Encode(codec, status) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -54,7 +54,7 @@ func TestV1EncodeDecodeStatus(t *testing.T) { if typeMeta.APIVersion != "v1" { t.Errorf("APIVersion is not set to \"v1\". Got %v", string(encoded)) } - decoded, err := runtime.Decode(v1Codec, encoded) + decoded, err := runtime.Decode(codec, encoded) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -72,7 +72,9 @@ func TestExperimentalEncodeDecodeStatus(t *testing.T) { } // TODO: caesarxuchao: use the testapi.Extensions.Codec() once the PR that // moves experimental from v1 to v1beta1 got merged. - expCodec := api.Codecs.LegacyCodec(extensions.SchemeGroupVersion) + _, codecs := TestScheme() + expCodec := apitesting.TestCodec(codecs, schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}) + encoded, err := runtime.Encode(expCodec, status) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/staging/src/k8s.io/apimachinery/pkg/test/util.go b/staging/src/k8s.io/apimachinery/pkg/test/util.go new file mode 100644 index 00000000000..721eff4ad16 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/test/util.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/testapigroup" + "k8s.io/apimachinery/pkg/apis/testapigroup/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// List and ListV1 should be kept in sync with k8s.io/kubernetes/pkg/api#List +// and k8s.io/api/core/v1#List. +type List struct { + metav1.TypeMeta + metav1.ListMeta + + Items []runtime.Object +} + +type ListV1 struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +func TestScheme() (*runtime.Scheme, apiserializer.CodecFactory) { + internalGV := schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + externalGV := schema.GroupVersion{Group: "", Version: "v1"} + scheme := runtime.NewScheme() + + scheme.AddKnownTypes(internalGV, + &testapigroup.Carp{}, + &testapigroup.CarpList{}, + &List{}, + ) + scheme.AddKnownTypes(externalGV, + &v1.Carp{}, + &v1.CarpList{}, + &List{}, + ) + testapigroup.AddToScheme(scheme) + v1.AddToScheme(scheme) + + codecs := apiserializer.NewCodecFactory(scheme) + return scheme, codecs +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/errors/errors.go b/staging/src/k8s.io/apimachinery/pkg/util/errors/errors.go index bdea0e16c72..26e7eb2082f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -21,7 +21,7 @@ import ( "fmt" ) -// MessagesgCountMap contains occurance for each error message. +// MessageCountMap contains occurance for each error message. type MessageCountMap map[string]int // Aggregate represents an object that contains multiple errors, but does not diff --git a/staging/src/k8s.io/apimachinery/pkg/util/errors/errors_test.go b/staging/src/k8s.io/apimachinery/pkg/util/errors/errors_test.go index 3335326cab1..0ad3967d285 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/errors/errors_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/errors/errors_test.go @@ -266,7 +266,7 @@ func TestFlatten(t *testing.T) { func TestCreateAggregateFromMessageCountMap(t *testing.T) { testCases := []struct { name string - mcp MessageCountMap + mcm MessageCountMap expected Aggregate }{ { @@ -279,6 +279,11 @@ func TestCreateAggregateFromMessageCountMap(t *testing.T) { MessageCountMap{"abc": 2, "ghi": 1}, aggregate{fmt.Errorf("abc (repeated 2 times)"), fmt.Errorf("ghi")}, }, + { + "input has multiple messages", + MessageCountMap{"ghi": 1, "abc": 2}, + aggregate{fmt.Errorf("abc (repeated 2 times)"), fmt.Errorf("ghi")}, + }, } var expected, agg []error @@ -288,8 +293,8 @@ func TestCreateAggregateFromMessageCountMap(t *testing.T) { expected = testCase.expected.Errors() sort.Slice(expected, func(i, j int) bool { return expected[i].Error() < expected[j].Error() }) } - if testCase.mcp != nil { - agg = CreateAggregateFromMessageCountMap(testCase.mcp).Errors() + if testCase.mcm != nil { + agg = CreateAggregateFromMessageCountMap(testCase.mcm).Errors() sort.Slice(agg, func(i, j int) bool { return agg[i].Error() < agg[j].Error() }) } if !reflect.DeepEqual(expected, agg) { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go b/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go index a1e53d2e436..e8d090e4513 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go @@ -29,6 +29,13 @@ import ( "github.com/golang/glog" ) +type AddressFamily uint + +const ( + familyIPv4 AddressFamily = 4 + familyIPv6 AddressFamily = 6 +) + type Route struct { Interface string Destination net.IP @@ -36,6 +43,7 @@ type Route struct { // TODO: add more fields here if needed } +// getRoutes obtains the IPv4 routes, and filters out non-default routes. func getRoutes(input io.Reader) ([]Route, error) { routes := []Route{} if input == nil { @@ -52,24 +60,30 @@ func getRoutes(input io.Reader) ([]Route, error) { continue } fields := strings.Fields(line) - routes = append(routes, Route{}) - route := &routes[len(routes)-1] - route.Interface = fields[0] - ip, err := parseIP(fields[1]) + dest, err := parseHexToIPv4(fields[1]) if err != nil { return nil, err } - route.Destination = ip - ip, err = parseIP(fields[2]) + gw, err := parseHexToIPv4(fields[2]) if err != nil { return nil, err } - route.Gateway = ip + if !dest.Equal(net.IPv4zero) { + continue + } + routes = append(routes, Route{ + Interface: fields[0], + Destination: dest, + Gateway: gw, + }) } return routes, nil } -func parseIP(str string) (net.IP, error) { +// parseHexToIPv4 takes the hex IP address string from route file and converts it +// from little endian to big endian for creation of a net.IP address. +// a net.IP, using big endian ordering. +func parseHexToIPv4(str string) (net.IP, error) { if str == "" { return nil, fmt.Errorf("input is nil") } @@ -77,12 +91,10 @@ func parseIP(str string) (net.IP, error) { if err != nil { return nil, err } - //TODO add ipv6 support if len(bytes) != net.IPv4len { - return nil, fmt.Errorf("only IPv4 is supported") + return nil, fmt.Errorf("invalid IPv4 address in route") } - bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] - return net.IP(bytes), nil + return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil } func isInterfaceUp(intf *net.Interface) bool { @@ -96,10 +108,22 @@ func isInterfaceUp(intf *net.Interface) bool { return false } -//getFinalIP method receives all the IP addrs of a Interface -//and returns a nil if the address is Loopback, Ipv6, link-local or nil. -//It returns a valid IPv4 if an Ipv4 address is found in the array. -func getFinalIP(addrs []net.Addr) (net.IP, error) { +func isLoopbackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +func inFamily(ip net.IP, expectedFamily AddressFamily) bool { + ipFamily := familyIPv4 + if ip.To4() == nil { + ipFamily = familyIPv6 + } + return ipFamily == expectedFamily +} + +// getMatchingGlobalIP method checks all the IP addresses of a Interface looking +// for a valid non-loopback/link-local address of the requested family and returns +// it, if found. +func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { glog.V(4).Infof("Checking addr %s.", addrs[i].String()) @@ -107,17 +131,15 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { if err != nil { return nil, err } - //Only IPv4 - //TODO : add IPv6 support - if ip.To4() != nil { - if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + if inFamily(ip, family) { + if ip.IsGlobalUnicast() { glog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Loopback/link-local found %v", ip) + glog.V(4).Infof("non-global IP found %v", ip) } } else { - glog.V(4).Infof("%v is not a valid IPv4 address", ip) + glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -125,7 +147,7 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { return nil, nil } -func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { +func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { intf, err := nw.InterfaceByName(intfName) if err != nil { return nil, err @@ -136,63 +158,77 @@ func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { return nil, err } glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) - finalIP, err := getFinalIP(addrs) + matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } - if finalIP != nil { - glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) - return finalIP, nil + if matchingIP != nil { + glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + return matchingIP, nil } } - return nil, nil } -func flagsSet(flags net.Flags, test net.Flags) bool { - return flags&test != 0 +// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +func memberOf(ip net.IP, family AddressFamily) bool { + if ip.To4() != nil { + return family == familyIPv4 + } else { + return family == familyIPv6 + } } -func flagsClear(flags net.Flags, test net.Flags) bool { - return flags&test == 0 -} - -func chooseHostInterfaceNativeGo() (net.IP, error) { - intfs, err := net.Interfaces() +// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that +// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. +// Searches for IPv4 addresses, and then IPv6 addresses. +func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { + intfs, err := nw.Interfaces() if err != nil { return nil, err } - i := 0 - var ip net.IP - for i = range intfs { - if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { - addrs, err := intfs[i].Addrs() + if len(intfs) == 0 { + return nil, fmt.Errorf("no interfaces found on host.") + } + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, intf := range intfs { + if !isInterfaceUp(&intf) { + glog.V(4).Infof("Skipping: down interface %q", intf.Name) + continue + } + if isLoopbackOrPointToPoint(&intf) { + glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + continue + } + addrs, err := nw.Addrs(&intf) if err != nil { return nil, err } - if len(addrs) > 0 { - for _, addr := range addrs { - if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { - if addrIP.To4() != nil { - ip = addrIP.To4() - if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { - break - } - } - } + if len(addrs) == 0 { + glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + continue + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } - if ip != nil { - // This interface should suffice. - break + if !memberOf(ip, family) { + glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + continue } + // TODO: Decide if should open up to allow IPv6 LLAs in future. + if !ip.IsGlobalUnicast() { + glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + continue + } + glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + return ip, nil } } } - if ip == nil { - return nil, fmt.Errorf("no acceptable interface from host") - } - glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) - return ip, nil + return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") } //ChooseHostInterface is a method used fetch an IP for a daemon. @@ -200,39 +236,41 @@ func chooseHostInterfaceNativeGo() (net.IP, error) { //For a node with no internet connection ,it returns error //For a multi n/w interface node it returns the IP of the interface with gateway on it. func ChooseHostInterface() (net.IP, error) { + var nw networkInterfacer = networkInterface{} inFile, err := os.Open("/proc/net/route") if err != nil { if os.IsNotExist(err) { - return chooseHostInterfaceNativeGo() + return chooseIPFromHostInterfaces(nw) } return nil, err } defer inFile.Close() - var nw networkInterfacer = networkInterface{} return chooseHostInterfaceFromRoute(inFile, nw) } +// networkInterfacer defines an interface for several net library functions. Production +// code will forward to net library functions, and unit tests will override the methods +// for testing purposes. type networkInterfacer interface { InterfaceByName(intfName string) (*net.Interface, error) Addrs(intf *net.Interface) ([]net.Addr, error) + Interfaces() ([]net.Interface, error) } +// networkInterface implements the networkInterfacer interface for production code, just +// wrapping the underlying net library function calls. type networkInterface struct{} func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - intf, err := net.InterfaceByName(intfName) - if err != nil { - return nil, err - } - return intf, nil + return net.InterfaceByName(intfName) } func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - return addrs, nil + return intf.Addrs() +} + +func (_ networkInterface) Interfaces() ([]net.Interface, error) { + return net.Interfaces() } func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { @@ -240,27 +278,30 @@ func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.I if err != nil { return nil, err } - zero := net.IP{0, 0, 0, 0} - var finalIP net.IP - for i := range routes { - //find interface with gateway - if routes[i].Destination.Equal(zero) { - glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) - finalIP, err := getIPFromInterface(routes[i].Interface, nw) + if len(routes) == 0 { + return nil, fmt.Errorf("No default routes.") + } + // TODO: append IPv6 routes for processing - currently only have IPv4 routes + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + for _, route := range routes { + // TODO: When have IPv6 routes, filter here to speed up processing + // if route.Family != family { + // continue + // } + glog.V(4).Infof("Default route transits interface %q", route.Interface) + finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Choosing IP %v ", finalIP) + glog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No valid IP found") - if finalIP == nil { - return nil, fmt.Errorf("Unable to select an IP.") - } - return nil, nil + glog.V(4).Infof("No active IP found by looking at default routes") + return nil, fmt.Errorf("unable to select an IP from default routes.") } // If bind-address is usable, return it directly diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/interface_test.go b/staging/src/k8s.io/apimachinery/pkg/util/net/interface_test.go index fc66904c57e..373af00b5c6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/interface_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/interface_test.go @@ -48,19 +48,19 @@ virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 ` const nothing = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT ` -const gatewayfirstIpv6_1 = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +const badDestination = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT eth3 00000000 0100FE0A 0003 0 0 1024 00000000 0 0 0 eth3 0000FE0AA1 00000000 0001 0 0 0 0080FFFF 0 0 0 docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 ` -const gatewayfirstIpv6_2 = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +const badGateway = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT eth3 00000000 0100FE0AA1 0003 0 0 1024 00000000 0 0 0 eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 ` -const route_Invalidhex = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +const route_Invalidhex = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT eth3 00000000 0100FE0AA 0003 0 0 1024 00000000 0 0 0 eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 @@ -73,25 +73,66 @@ eth0 00000000 0120372D 0001 0 0 0 00000000 eth0 00000000 00000000 0001 0 0 2048 00000000 0 0 0 ` +const ( + flagUp = net.FlagUp | net.FlagBroadcast | net.FlagMulticast + flagDown = net.FlagBroadcast | net.FlagMulticast + flagLoopback = net.FlagUp | net.FlagLoopback + flagP2P = net.FlagUp | net.FlagPointToPoint +) + +func makeIntf(index int, name string, flags net.Flags) net.Interface { + mac := net.HardwareAddr{0, 0x32, 0x7d, 0x69, 0xf7, byte(0x30 + index)} + return net.Interface{ + Index: index, + MTU: 1500, + Name: name, + HardwareAddr: mac, + Flags: flags} +} + +var ( + downIntf = makeIntf(1, "eth3", flagDown) + loopbackIntf = makeIntf(1, "lo", flagLoopback) + p2pIntf = makeIntf(1, "lo", flagP2P) + upIntf = makeIntf(1, "eth3", flagUp) +) + +var ( + ipv4Route = Route{Interface: "eth3", Gateway: net.ParseIP("10.254.0.1")} +) + func TestGetRoutes(t *testing.T) { testCases := []struct { - tcase string - route string - expected int + tcase string + route string + count int + expected *Route + errStrFrag string }{ - {"gatewayfirst", gatewayfirst, 4}, - {"gatewaymiddle", gatewaymiddle, 4}, - {"gatewaylast", gatewaylast, 4}, - {"nothing", nothing, 0}, - {"gatewayfirstIpv6_1", gatewayfirstIpv6_1, 0}, - {"gatewayfirstIpv6_2", gatewayfirstIpv6_2, 0}, - {"route_Invalidhex", route_Invalidhex, 0}, + {"gatewayfirst", gatewayfirst, 1, &ipv4Route, ""}, + {"gatewaymiddle", gatewaymiddle, 1, &ipv4Route, ""}, + {"gatewaylast", gatewaylast, 1, &ipv4Route, ""}, + {"no routes", nothing, 0, nil, ""}, + {"badDestination", badDestination, 0, nil, "invalid IPv4"}, + {"badGateway", badGateway, 0, nil, "invalid IPv4"}, + {"route_Invalidhex", route_Invalidhex, 0, nil, "odd length hex string"}, + {"no default routes", noInternetConnection, 0, nil, ""}, } for _, tc := range testCases { r := strings.NewReader(tc.route) routes, err := getRoutes(r) - if len(routes) != tc.expected { - t.Errorf("case[%v]: expected %v, got %v .err : %v", tc.tcase, tc.expected, len(routes), err) + if err != nil { + if !strings.Contains(err.Error(), tc.errStrFrag) { + t.Errorf("case[%s]: Error string %q does not contain %q", tc.tcase, err, tc.errStrFrag) + } + } else if tc.errStrFrag != "" { + t.Errorf("case[%s]: Error %q expected, but not seen", tc.tcase, tc.errStrFrag) + } else { + if tc.count != len(routes) { + t.Errorf("case[%s]: expected %d routes, have %v", tc.tcase, tc.count, routes) + } else if tc.count == 1 && !tc.expected.Gateway.Equal(routes[0].Gateway) { + t.Errorf("case[%s]: expected %v, got %v .err : %v", tc.tcase, tc.expected, routes, err) + } } } } @@ -112,7 +153,7 @@ func TestParseIP(t *testing.T) { {"valid", "12345678", true, net.IP{120, 86, 52, 18}}, } for _, tc := range testCases { - ip, err := parseIP(tc.ip) + ip, err := parseHexToIPv4(tc.ip) if !ip.Equal(tc.expected) { t.Errorf("case[%v]: expected %q, got %q . err : %v", tc.tcase, tc.expected, ip, err) } @@ -122,15 +163,15 @@ func TestParseIP(t *testing.T) { func TestIsInterfaceUp(t *testing.T) { testCases := []struct { tcase string - intf net.Interface + intf *net.Interface expected bool }{ - {"up", net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp}, true}, - {"down", net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: 0}, false}, - {"nothing", net.Interface{}, false}, + {"up", &net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp}, true}, + {"down", &net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: 0}, false}, + {"no interface", nil, false}, } for _, tc := range testCases { - it := isInterfaceUp(&tc.intf) + it := isInterfaceUp(tc.intf) if it != tc.expected { t.Errorf("case[%v]: expected %v, got %v .", tc.tcase, tc.expected, it) } @@ -150,17 +191,24 @@ func TestFinalIP(t *testing.T) { testCases := []struct { tcase string addr []net.Addr + family AddressFamily expected net.IP }{ - {"ipv6", []net.Addr{addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}}, nil}, - {"invalidCIDR", []net.Addr{addrStruct{val: "fe80::2f7:67fff:fe6e:2956/64"}}, nil}, - {"loopback", []net.Addr{addrStruct{val: "127.0.0.1/24"}}, nil}, - {"ip4", []net.Addr{addrStruct{val: "10.254.12.132/17"}}, net.ParseIP("10.254.12.132")}, + {"no ipv4", []net.Addr{addrStruct{val: "2001::5/64"}}, familyIPv4, nil}, + {"no ipv6", []net.Addr{addrStruct{val: "10.128.0.4/32"}}, familyIPv6, nil}, + {"invalidV4CIDR", []net.Addr{addrStruct{val: "10.20.30.40.50/24"}}, familyIPv4, nil}, + {"invalidV6CIDR", []net.Addr{addrStruct{val: "fe80::2f7:67fff:fe6e:2956/64"}}, familyIPv6, nil}, + {"loopback", []net.Addr{addrStruct{val: "127.0.0.1/24"}}, familyIPv4, nil}, + {"loopbackv6", []net.Addr{addrStruct{val: "::1/128"}}, familyIPv6, nil}, + {"link local v4", []net.Addr{addrStruct{val: "169.254.1.10/16"}}, familyIPv4, nil}, + {"link local v6", []net.Addr{addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}}, familyIPv6, nil}, + {"ip4", []net.Addr{addrStruct{val: "10.254.12.132/17"}}, familyIPv4, net.ParseIP("10.254.12.132")}, + {"ip6", []net.Addr{addrStruct{val: "2001::5/64"}}, familyIPv6, net.ParseIP("2001::5")}, - {"nothing", []net.Addr{}, nil}, + {"no addresses", []net.Addr{}, familyIPv4, nil}, } for _, tc := range testCases { - ip, err := getFinalIP(tc.addr) + ip, err := getMatchingGlobalIP(tc.addr, tc.family) if !ip.Equal(tc.expected) { t.Errorf("case[%v]: expected %v, got %v .err : %v", tc.tcase, tc.expected, ip, err) } @@ -179,12 +227,12 @@ func TestAddrs(t *testing.T) { } } +// Has a valid IPv4 address (IPv6 is LLA) type validNetworkInterface struct { } func (_ validNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} - return &c, nil + return &upIntf, nil } func (_ validNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { var ifat []net.Addr @@ -192,81 +240,194 @@ func (_ validNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}, addrStruct{val: "10.254.71.145/17"}} return ifat, nil } - -type validNetworkInterfaceWithLinkLocal struct { +func (_ validNetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil } -func (_ validNetworkInterfaceWithLinkLocal) InterfaceByName(intfName string) (*net.Interface, error) { - c := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: net.FlagUp} - return &c, nil +// Interface with only IPv6 address +type ipv6NetworkInterface struct { } -func (_ validNetworkInterfaceWithLinkLocal) Addrs(intf *net.Interface) ([]net.Addr, error) { + +func (_ ipv6NetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return &upIntf, nil +} +func (_ ipv6NetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { var ifat []net.Addr - ifat = []net.Addr{addrStruct{val: "169.254.162.166/16"}, addrStruct{val: "45.55.47.146/19"}} + ifat = []net.Addr{addrStruct{val: "2001::200/64"}} return ifat, nil } -type validNetworkInterfacewithIpv6Only struct { +func (_ ipv6NetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil } -func (_ validNetworkInterfacewithIpv6Only) InterfaceByName(intfName string) (*net.Interface, error) { - c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} - return &c, nil +// Only with link local addresses +type networkInterfaceWithOnlyLinkLocals struct { } -func (_ validNetworkInterfacewithIpv6Only) Addrs(intf *net.Interface) ([]net.Addr, error) { + +func (_ networkInterfaceWithOnlyLinkLocals) InterfaceByName(intfName string) (*net.Interface, error) { + return &upIntf, nil +} +func (_ networkInterfaceWithOnlyLinkLocals) Addrs(intf *net.Interface) ([]net.Addr, error) { var ifat []net.Addr - ifat = []net.Addr{addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}} + ifat = []net.Addr{addrStruct{val: "169.254.162.166/16"}, addrStruct{val: "fe80::200/10"}} return ifat, nil } +func (_ networkInterfaceWithOnlyLinkLocals) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil +} +// Unable to get interface(s) +type failGettingNetworkInterface struct { +} + +func (_ failGettingNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return nil, fmt.Errorf("unable get Interface") +} +func (_ failGettingNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + return nil, nil +} +func (_ failGettingNetworkInterface) Interfaces() ([]net.Interface, error) { + return nil, fmt.Errorf("mock failed getting all interfaces") +} + +// No interfaces type noNetworkInterface struct { } func (_ noNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - return nil, fmt.Errorf("unable get Interface") + return nil, fmt.Errorf("no such network interface") } func (_ noNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { return nil, nil } - -type networkInterfacewithNoAddrs struct { +func (_ noNetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{}, nil } -func (_ networkInterfacewithNoAddrs) InterfaceByName(intfName string) (*net.Interface, error) { - c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} - return &c, nil -} -func (_ networkInterfacewithNoAddrs) Addrs(intf *net.Interface) ([]net.Addr, error) { - return nil, fmt.Errorf("unable get Addrs") +// Interface is down +type downNetworkInterface struct { } -type networkInterfacewithIpv6addrs struct { +func (_ downNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return &downIntf, nil } - -func (_ networkInterfacewithIpv6addrs) InterfaceByName(intfName string) (*net.Interface, error) { - c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} - return &c, nil -} -func (_ networkInterfacewithIpv6addrs) Addrs(intf *net.Interface) ([]net.Addr, error) { +func (_ downNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { var ifat []net.Addr - ifat = []net.Addr{addrStruct{val: "fe80::2f7:6ffff:fe6e:2956/64"}} + ifat = []net.Addr{ + addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}, addrStruct{val: "10.254.71.145/17"}} return ifat, nil } +func (_ downNetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{downIntf}, nil +} + +// Loopback interface +type loopbackNetworkInterface struct { +} + +func (_ loopbackNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return &loopbackIntf, nil +} +func (_ loopbackNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{ + addrStruct{val: "::1/128"}, addrStruct{val: "127.0.0.1/8"}} + return ifat, nil +} +func (_ loopbackNetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{loopbackIntf}, nil +} + +// Point to point interface +type p2pNetworkInterface struct { +} + +func (_ p2pNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return &p2pIntf, nil +} +func (_ p2pNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{ + addrStruct{val: "::1/128"}, addrStruct{val: "127.0.0.1/8"}} + return ifat, nil +} +func (_ p2pNetworkInterface) Interfaces() ([]net.Interface, error) { + return []net.Interface{p2pIntf}, nil +} + +// Unable to get IP addresses for interface +type networkInterfaceFailGetAddrs struct { +} + +func (_ networkInterfaceFailGetAddrs) InterfaceByName(intfName string) (*net.Interface, error) { + return &upIntf, nil +} +func (_ networkInterfaceFailGetAddrs) Addrs(intf *net.Interface) ([]net.Addr, error) { + return nil, fmt.Errorf("unable to get Addrs") +} +func (_ networkInterfaceFailGetAddrs) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil +} + +// No addresses for interface +type networkInterfaceWithNoAddrs struct { +} + +func (_ networkInterfaceWithNoAddrs) InterfaceByName(intfName string) (*net.Interface, error) { + return &upIntf, nil +} +func (_ networkInterfaceWithNoAddrs) Addrs(intf *net.Interface) ([]net.Addr, error) { + ifat := []net.Addr{} + return ifat, nil +} +func (_ networkInterfaceWithNoAddrs) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil +} + +// Invalid addresses for interface +type networkInterfaceWithInvalidAddr struct { +} + +func (_ networkInterfaceWithInvalidAddr) InterfaceByName(intfName string) (*net.Interface, error) { + return &upIntf, nil +} +func (_ networkInterfaceWithInvalidAddr) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{addrStruct{val: "10.20.30.40.50/24"}} + return ifat, nil +} +func (_ networkInterfaceWithInvalidAddr) Interfaces() ([]net.Interface, error) { + return []net.Interface{upIntf}, nil +} func TestGetIPFromInterface(t *testing.T) { testCases := []struct { - tcase string - nwname string - nw networkInterfacer - expected net.IP + tcase string + nwname string + family AddressFamily + nw networkInterfacer + expected net.IP + errStrFrag string }{ - {"valid", "eth3", validNetworkInterface{}, net.ParseIP("10.254.71.145")}, - {"ipv6", "eth3", validNetworkInterfacewithIpv6Only{}, nil}, - {"nothing", "eth3", noNetworkInterface{}, nil}, + {"ipv4", "eth3", familyIPv4, validNetworkInterface{}, net.ParseIP("10.254.71.145"), ""}, + {"ipv6", "eth3", familyIPv6, ipv6NetworkInterface{}, net.ParseIP("2001::200"), ""}, + {"no ipv4", "eth3", familyIPv4, ipv6NetworkInterface{}, nil, ""}, + {"no ipv6", "eth3", familyIPv6, validNetworkInterface{}, nil, ""}, + {"I/F down", "eth3", familyIPv4, downNetworkInterface{}, nil, ""}, + {"I/F get fail", "eth3", familyIPv4, noNetworkInterface{}, nil, "no such network interface"}, + {"fail get addr", "eth3", familyIPv4, networkInterfaceFailGetAddrs{}, nil, "unable to get Addrs"}, + {"bad addr", "eth3", familyIPv4, networkInterfaceWithInvalidAddr{}, nil, "invalid CIDR"}, } for _, tc := range testCases { - ip, err := getIPFromInterface(tc.nwname, tc.nw) - if !ip.Equal(tc.expected) { + ip, err := getIPFromInterface(tc.nwname, tc.family, tc.nw) + if err != nil { + if !strings.Contains(err.Error(), tc.errStrFrag) { + t.Errorf("case[%s]: Error string %q does not contain %q", tc.tcase, err, tc.errStrFrag) + } + } else if tc.errStrFrag != "" { + t.Errorf("case[%s]: Error %q expected, but not seen", tc.tcase, tc.errStrFrag) + } else if !ip.Equal(tc.expected) { t.Errorf("case[%v]: expected %v, got %+v .err : %v", tc.tcase, tc.expected, ip, err) } } @@ -279,17 +440,14 @@ func TestChooseHostInterfaceFromRoute(t *testing.T) { nw networkInterfacer expected net.IP }{ - {"valid_routefirst", strings.NewReader(gatewayfirst), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, - {"valid_routelast", strings.NewReader(gatewaylast), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, - {"valid_routemiddle", strings.NewReader(gatewaymiddle), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, - {"valid_routemiddle_ipv6", strings.NewReader(gatewaymiddle), validNetworkInterfacewithIpv6Only{}, nil}, - {"no internet connection", strings.NewReader(noInternetConnection), validNetworkInterface{}, nil}, - {"no non-link-local ip", strings.NewReader(gatewayfirstLinkLocal), validNetworkInterfaceWithLinkLocal{}, net.ParseIP("45.55.47.146")}, - {"no route", strings.NewReader(nothing), validNetworkInterface{}, nil}, + {"ipv4", strings.NewReader(gatewayfirst), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, + {"ipv6", strings.NewReader(gatewaymiddle), ipv6NetworkInterface{}, net.ParseIP("2001::200")}, + {"no non-link-local ip", strings.NewReader(gatewaymiddle), networkInterfaceWithOnlyLinkLocals{}, nil}, + {"no routes", strings.NewReader(nothing), validNetworkInterface{}, nil}, {"no route file", nil, validNetworkInterface{}, nil}, {"no interfaces", nil, noNetworkInterface{}, nil}, - {"no interface Addrs", strings.NewReader(gatewaymiddle), networkInterfacewithNoAddrs{}, nil}, - {"Invalid Addrs", strings.NewReader(gatewaymiddle), networkInterfacewithIpv6addrs{}, nil}, + {"no interface addrs", strings.NewReader(gatewaymiddle), networkInterfaceWithNoAddrs{}, nil}, + {"fail get addrs", strings.NewReader(gatewaymiddle), networkInterfaceFailGetAddrs{}, nil}, } for _, tc := range testCases { ip, err := chooseHostInterfaceFromRoute(tc.inFile, tc.nw) @@ -298,3 +456,52 @@ func TestChooseHostInterfaceFromRoute(t *testing.T) { } } } +func TestMemberOf(t *testing.T) { + testCases := []struct { + tcase string + ip net.IP + family AddressFamily + expected bool + }{ + {"ipv4 is 4", net.ParseIP("10.20.30.40"), familyIPv4, true}, + {"ipv4 is 6", net.ParseIP("10.10.10.10"), familyIPv6, false}, + {"ipv6 is 4", net.ParseIP("2001::100"), familyIPv4, false}, + {"ipv6 is 6", net.ParseIP("2001::100"), familyIPv6, true}, + } + for _, tc := range testCases { + if memberOf(tc.ip, tc.family) != tc.expected { + t.Errorf("case[%s]: expected %+v", tc.tcase, tc.expected) + } + } +} + +func TestGetIPFromHostInterfaces(t *testing.T) { + testCases := []struct { + tcase string + nw networkInterfacer + expected net.IP + errStrFrag string + }{ + {"fail get I/Fs", failGettingNetworkInterface{}, nil, "failed getting all interfaces"}, + {"no interfaces", noNetworkInterface{}, nil, "no interfaces"}, + {"I/F not up", downNetworkInterface{}, nil, "no acceptable"}, + {"loopback only", loopbackNetworkInterface{}, nil, "no acceptable"}, + {"P2P I/F only", p2pNetworkInterface{}, nil, "no acceptable"}, + {"fail get addrs", networkInterfaceFailGetAddrs{}, nil, "unable to get Addrs"}, + {"no addresses", networkInterfaceWithNoAddrs{}, nil, "no acceptable"}, + {"invalid addr", networkInterfaceWithInvalidAddr{}, nil, "invalid CIDR"}, + {"no matches", networkInterfaceWithOnlyLinkLocals{}, nil, "no acceptable"}, + {"ipv4", validNetworkInterface{}, net.ParseIP("10.254.71.145"), ""}, + {"ipv6", ipv6NetworkInterface{}, net.ParseIP("2001::200"), ""}, + } + + for _, tc := range testCases { + ip, err := chooseIPFromHostInterfaces(tc.nw) + if !ip.Equal(tc.expected) { + t.Errorf("case[%s]: expected %+v, got %+v with err : %v", tc.tcase, tc.expected, ip, err) + } + if err != nil && !strings.Contains(err.Error(), tc.errStrFrag) { + t.Errorf("case[%s]: unable to find %q in error string %q", tc.tcase, tc.errStrFrag, err.Error()) + } + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD new file mode 100644 index 00000000000..d8c7ef55cbc --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "dial_test.go", + "transport_test.go", + "upgradeaware_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "dial.go", + "doc.go", + "transport.go", + "upgradeaware.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", + "//vendor/golang.org/x/net/html:go_default_library", + "//vendor/golang.org/x/net/html/atom:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/dial.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/util/proxy/dial.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/dial_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/util/proxy/dial_test.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/doc.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/doc.go similarity index 90% rename from staging/src/k8s.io/apiserver/pkg/util/proxy/doc.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/doc.go index 0b212f3daff..d14ecfad544 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/proxy/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package proxy provides transport and upgrade support for proxies -package proxy // import "k8s.io/apiserver/pkg/util/proxy" +// Package proxy provides transport and upgrade support for proxies. +package proxy // import "k8s.io/apimachinery/pkg/util/proxy" diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/transport.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/util/proxy/transport.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/transport_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport_test.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/util/proxy/transport_test.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/transport_test.go diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go similarity index 85% rename from staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index e6ef8cb9249..ff04578e29a 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package proxy import ( "context" @@ -32,16 +32,13 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - genericfeatures "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/apiserver/pkg/util/proxy" "github.com/golang/glog" "github.com/mxk/go-flowrate/flowrate" ) -// UpgradeAwareProxyHandler is a handler for proxy requests that may require an upgrade -type UpgradeAwareProxyHandler struct { +// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade +type UpgradeAwareHandler struct { UpgradeRequired bool Location *url.URL // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used @@ -64,10 +61,10 @@ type ErrorResponder interface { Error(err error) } -// NewUpgradeAwareProxyHandler creates a new proxy handler with a default flush interval. Responder is required for returning +// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning // errors to the caller. -func NewUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareProxyHandler { - return &UpgradeAwareProxyHandler{ +func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler { + return &UpgradeAwareHandler{ Location: location, Transport: transport, WrapTransport: wrapTransport, @@ -78,7 +75,7 @@ func NewUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, } // ServeHTTP handles the proxy request -func (h *UpgradeAwareProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { if len(h.Location.Scheme) == 0 { h.Location.Scheme = "http" } @@ -129,7 +126,7 @@ func (h *UpgradeAwareProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Re } // tryUpgrade returns true if the request was handled. -func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { +func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { if !httpstream.IsUpgradeRequest(req) { return false } @@ -144,7 +141,7 @@ func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.R // Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy // handles this in the non-upgrade path. utilnet.AppendForwardedForHeader(clone) - if h.InterceptRedirects && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) { + if h.InterceptRedirects { backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, h.Location, clone.Header, req.Body, h) } else { clone.URL = h.Location @@ -214,8 +211,8 @@ func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.R } // Dial dials the backend at req.URL and writes req to it. -func (h *UpgradeAwareProxyHandler) Dial(req *http.Request) (net.Conn, error) { - conn, err := proxy.DialURL(req.URL, h.Transport) +func (h *UpgradeAwareHandler) Dial(req *http.Request) (net.Conn, error) { + conn, err := DialURL(req.URL, h.Transport) if err != nil { return nil, fmt.Errorf("error dialing backend: %v", err) } @@ -228,9 +225,9 @@ func (h *UpgradeAwareProxyHandler) Dial(req *http.Request) (net.Conn, error) { return conn, err } -var _ utilnet.Dialer = &UpgradeAwareProxyHandler{} +var _ utilnet.Dialer = &UpgradeAwareHandler{} -func (h *UpgradeAwareProxyHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { +func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { scheme := url.Scheme host := url.Host suffix := h.Location.Path @@ -238,7 +235,7 @@ func (h *UpgradeAwareProxyHandler) defaultProxyTransport(url *url.URL, internalT suffix += "/" } pathPrepend := strings.TrimSuffix(url.Path, suffix) - rewritingTransport := &proxy.Transport{ + rewritingTransport := &Transport{ Scheme: scheme, Host: host, PathPrepend: pathPrepend, diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go similarity index 91% rename from staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go rename to staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go index 96ebed4d0b7..3e6f11237d4 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package proxy import ( "bytes" @@ -43,9 +43,6 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/apiserver/pkg/util/proxy" ) const fakeStatusCode = 567 @@ -248,7 +245,7 @@ func TestServeHTTP(t *testing.T) { responder := &fakeResponder{t: t} backendURL, _ := url.Parse(backendServer.URL) backendURL.Path = test.requestPath - proxyHandler := &UpgradeAwareProxyHandler{ + proxyHandler := &UpgradeAwareHandler{ Location: backendURL, Responder: responder, UpgradeRequired: test.upgradeRequired, @@ -402,8 +399,6 @@ func TestProxyUpgrade(t *testing.T) { }, } - // Enable StreamingProxyRedirects for test. - utilfeature.DefaultFeatureGate.Set(string(features.StreamingProxyRedirects) + "=true") for k, tc := range testcases { for _, redirect := range []bool{false, true} { tcName := k @@ -428,7 +423,7 @@ func TestProxyUpgrade(t *testing.T) { serverURL, _ := url.Parse(backendServer.URL) serverURL.Path = backendPath - proxyHandler := &UpgradeAwareProxyHandler{ + proxyHandler := &UpgradeAwareHandler{ Location: serverURL, Transport: tc.ProxyTransport, InterceptRedirects: redirect, @@ -479,7 +474,7 @@ func TestProxyUpgradeErrorResponse(t *testing.T) { return &fakeConn{err: expectedErr}, nil } responder = &fakeResponder{t: t, w: w} - proxyHandler := &UpgradeAwareProxyHandler{ + proxyHandler := &UpgradeAwareHandler{ Location: &url.URL{ Host: "fake-backend", }, @@ -545,11 +540,11 @@ func TestDefaultProxyTransport(t *testing.T) { for _, test := range tests { locURL, _ := url.Parse(test.location) URL, _ := url.Parse(test.url) - h := UpgradeAwareProxyHandler{ + h := UpgradeAwareHandler{ Location: locURL, } result := h.defaultProxyTransport(URL, nil) - transport := result.(*corsRemovingTransport).RoundTripper.(*proxy.Transport) + transport := result.(*corsRemovingTransport).RoundTripper.(*Transport) if transport.Scheme != test.expectedScheme { t.Errorf("%s: unexpected scheme. Actual: %s, Expected: %s", test.name, transport.Scheme, test.expectedScheme) } @@ -721,7 +716,7 @@ func TestProxyRequestContentLengthAndTransferEncoding(t *testing.T) { responder := &fakeResponder{t: t} backendURL, _ := url.Parse(downstreamServer.URL) - proxyHandler := &UpgradeAwareProxyHandler{ + proxyHandler := &UpgradeAwareHandler{ Location: backendURL, Responder: responder, UpgradeRequired: false, @@ -799,28 +794,3 @@ P7y5NeJnE7X6XkyC35zrsJRkz7orE8MCIHdDjsI8pjyNDeGqwUCDWE/a6DrmIDwe emHSqMN2YvChAiEAnxLCM9NWaenOsaIoP+J1rDuvw+4499nJKVqGuVrSCRkCIEqK 4KSchPMc3x8M/uhw9oWTtKFmjA/PPh0FsWCdKrEy -----END RSA PRIVATE KEY-----`) - -// localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h -var localhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBjzCCATmgAwIBAgIRAKpi2WmTcFrVjxrl5n5YDUEwDQYJKoZIhvcNAQELBQAw -EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2 -MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgC -QQC9fEbRszP3t14Gr4oahV7zFObBI4TfA5i7YnlMXeLinb7MnvT4bkfOJzE6zktn -59zP7UiHs3l4YOuqrjiwM413AgMBAAGjaDBmMA4GA1UdDwEB/wQEAwICpDATBgNV -HSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MC4GA1UdEQQnMCWCC2V4 -YW1wbGUuY29thwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUA -A0EAUsVE6KMnza/ZbodLlyeMzdo7EM/5nb5ywyOxgIOCf0OOLHsPS9ueGLQX9HEG -//yjTXuhNcUugExIjM/AIwAZPQ== ------END CERTIFICATE-----`) - -// localhostKey is the private key for localhostCert. -var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBAL18RtGzM/e3XgavihqFXvMU5sEjhN8DmLtieUxd4uKdvsye9Phu -R84nMTrOS2fn3M/tSIezeXhg66quOLAzjXcCAwEAAQJBAKcRxH9wuglYLBdI/0OT -BLzfWPZCEw1vZmMR2FF1Fm8nkNOVDPleeVGTWoOEcYYlQbpTmkGSxJ6ya+hqRi6x -goECIQDx3+X49fwpL6B5qpJIJMyZBSCuMhH4B7JevhGGFENi3wIhAMiNJN5Q3UkL -IuSvv03kaPR5XVQ99/UeEetUgGvBcABpAiBJSBzVITIVCGkGc7d+RCf49KTCIklv -bGWObufAR8Ni4QIgWpILjW8dkGg8GOUZ0zaNA6Nvt6TIv2UWGJ4v5PoV98kCIQDx -rIiZs5QbKdycsv9gQJzwQAogC8o04X3Zz3dsoX+h4A== ------END RSA PRIVATE KEY-----`) diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD index 29ca5c810c0..6e18297cc5c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD @@ -45,3 +45,14 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", ], ) + +go_test( + name = "go_default_test", + srcs = ["until_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) diff --git a/pkg/apimachinery/tests/watch_until_test.go b/staging/src/k8s.io/apimachinery/pkg/watch/until_test.go similarity index 88% rename from pkg/apimachinery/tests/watch_until_test.go rename to staging/src/k8s.io/apimachinery/pkg/watch/until_test.go index 43942af8dc9..fdcb6a1573c 100644 --- a/pkg/apimachinery/tests/watch_until_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/until_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tests +package watch import ( "errors" @@ -22,15 +22,20 @@ import ( "testing" "time" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" - . "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" ) +type fakePod struct { + name string +} + +func (obj *fakePod) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } + func TestUntil(t *testing.T) { fw := NewFake() go func() { - var obj *api.Pod + var obj *fakePod fw.Add(obj) fw.Modify(obj) }() @@ -50,7 +55,7 @@ func TestUntil(t *testing.T) { if lastEvent.Type != Modified { t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type) } - if got, isPod := lastEvent.Object.(*api.Pod); !isPod { + if got, isPod := lastEvent.Object.(*fakePod); !isPod { t.Fatalf("expected a pod event, got %#v", got) } } @@ -58,7 +63,7 @@ func TestUntil(t *testing.T) { func TestUntilMultipleConditions(t *testing.T) { fw := NewFake() go func() { - var obj *api.Pod + var obj *fakePod fw.Add(obj) }() conditions := []ConditionFunc{ @@ -77,7 +82,7 @@ func TestUntilMultipleConditions(t *testing.T) { if lastEvent.Type != Added { t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type) } - if got, isPod := lastEvent.Object.(*api.Pod); !isPod { + if got, isPod := lastEvent.Object.(*fakePod); !isPod { t.Fatalf("expected a pod event, got %#v", got) } } @@ -85,7 +90,7 @@ func TestUntilMultipleConditions(t *testing.T) { func TestUntilMultipleConditionsFail(t *testing.T) { fw := NewFake() go func() { - var obj *api.Pod + var obj *fakePod fw.Add(obj) }() conditions := []ConditionFunc{ @@ -105,7 +110,7 @@ func TestUntilMultipleConditionsFail(t *testing.T) { if lastEvent.Type != Added { t.Fatalf("expected ADDED event type, got %v", lastEvent.Type) } - if got, isPod := lastEvent.Object.(*api.Pod); !isPod { + if got, isPod := lastEvent.Object.(*fakePod); !isPod { t.Fatalf("expected a pod event, got %#v", got) } } @@ -113,7 +118,7 @@ func TestUntilMultipleConditionsFail(t *testing.T) { func TestUntilTimeout(t *testing.T) { fw := NewFake() go func() { - var obj *api.Pod + var obj *fakePod fw.Add(obj) fw.Modify(obj) }() @@ -137,7 +142,7 @@ func TestUntilTimeout(t *testing.T) { if lastEvent.Type != Modified { t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type) } - if got, isPod := lastEvent.Object.(*api.Pod); !isPod { + if got, isPod := lastEvent.Object.(*fakePod); !isPod { t.Fatalf("expected a pod event, got %#v", got) } } @@ -145,7 +150,7 @@ func TestUntilTimeout(t *testing.T) { func TestUntilErrorCondition(t *testing.T) { fw := NewFake() go func() { - var obj *api.Pod + var obj *fakePod fw.Add(obj) }() expected := "something bad" diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index a8ae808dbfd..ba0f5d17c44 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -10,6 +10,10 @@ "ImportPath": "bitbucket.org/ww/goautoneg", "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" }, + { + "ImportPath": "github.com/NYTimes/gziphandler", + "Rev": "56545f4a5d46df9a6648819d1664c3a03a13ffdb" + }, { "ImportPath": "github.com/PuerkitoBio/purell", "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" @@ -300,7 +304,7 @@ }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", @@ -326,10 +330,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/analysis", - "Rev": "b44dc874b601d9e4e2f6e19140e794ba24bead3b" - }, { "ImportPath": "github.com/go-openapi/jsonpointer", "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" @@ -338,10 +338,6 @@ "ImportPath": "github.com/go-openapi/jsonreference", "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" }, - { - "ImportPath": "github.com/go-openapi/loads", - "Rev": "18441dfa706d924a39a030ee2c3b1d8d81917b38" - }, { "ImportPath": "github.com/go-openapi/spec", "Rev": "6aced65f8501fe1217321abf0749d354824ba2ff" @@ -544,11 +540,11 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/stretchr/testify/require", - "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" + "Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a" }, { "ImportPath": "github.com/ugorji/go/codec", diff --git a/staging/src/k8s.io/apiserver/OWNERS b/staging/src/k8s.io/apiserver/OWNERS index 5d8ce226d8d..e4525fc6b7f 100644 --- a/staging/src/k8s.io/apiserver/OWNERS +++ b/staging/src/k8s.io/apiserver/OWNERS @@ -13,5 +13,5 @@ reviewers: - liggitt - sttts - ncdc -- timstclair +- tallclair - timothysc diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index a182441d676..4f0d65e5823 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -105,6 +105,11 @@ func (l *lifecycle) Admit(a admission.Attributes) error { return nil } + // always allow deletion of other resources + if a.GetOperation() == admission.Delete { + return nil + } + // always allow access review checks. Returning status about the namespace would be leaking information if isAccessReview(a) { return nil diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go index 2722003a884..9ddf97c6138 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go @@ -135,6 +135,24 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) { } t.Errorf("expected error returned from admission handler: %v", actions) } + + // verify create operations in the namespace cause an error + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, nil)) + if err == nil { + t.Errorf("Expected error rejecting creates in a namespace when it is missing") + } + + // verify update operations in the namespace cause an error + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, nil)) + if err == nil { + t.Errorf("Expected error rejecting updates in a namespace when it is missing") + } + + // verify delete operations in the namespace can proceed + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, nil)) + if err != nil { + t.Errorf("Unexpected error returned from admission handler: %v", err) + } } // TestAdmissionNamespaceActive verifies a resource is admitted when the namespace is active. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go index b2099aab89c..34bc671e8d1 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=audit.k8s.io package audit // import "k8s.io/apiserver/pkg/apis/audit" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/register.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/register.go index 9abf739ae0c..e14b82c1b13 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/register.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/register.go @@ -49,5 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Policy{}, &PolicyList{}, ) + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go index 0ca857678ce..8e8770d0084 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go @@ -100,8 +100,8 @@ type Event struct { // +optional ObjectRef *ObjectReference // The response status, populated even when the ResponseObject is not a Status type. - // For successful responses, this will only include the Code and StatusSuccess. - // For non-status type error responses, this will be auto-populated with the error Message. + // For successful responses, this will only include the Code. For non-status type + // error responses, this will be auto-populated with the error Message. // +optional ResponseStatus *metav1.Status diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD index 63cbc6bacd8..d6d26735cf6 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD @@ -21,6 +21,7 @@ go_library( srcs = ["validation.go"], tags = ["automanaged"], deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation.go index 2ceead05f4b..80b73f851cb 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation.go @@ -17,6 +17,9 @@ limitations under the License. package validation import ( + "strings" + + "k8s.io/apimachinery/pkg/api/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/apis/audit" ) @@ -33,6 +36,8 @@ func ValidatePolicy(policy *audit.Policy) field.ErrorList { func validatePolicyRule(rule audit.PolicyRule, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, validateLevel(rule.Level, fldPath.Child("level"))...) + allErrs = append(allErrs, validateNonResourceURLs(rule.NonResourceURLs, fldPath.Child("nonResourceURLs"))...) + allErrs = append(allErrs, validateResources(rule.Resources, fldPath.Child("resources"))...) if len(rule.NonResourceURLs) > 0 { if len(rule.Resources) > 0 || len(rule.Namespaces) > 0 { @@ -60,3 +65,40 @@ func validateLevel(level audit.Level, fldPath *field.Path) field.ErrorList { return field.ErrorList{field.NotSupported(fldPath, level, validLevels)} } } + +func validateNonResourceURLs(urls []string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, url := range urls { + if url == "*" { + continue + } + + if !strings.HasPrefix(url, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL rules must begin with a '/' character")) + } + + if url != "" && strings.ContainsRune(url[:len(url)-1], '*') { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL wildcards '*' must be the final character of the rule")) + } + } + return allErrs +} + +func validateResources(groupResources []audit.GroupResources, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, groupResource := range groupResources { + // The empty string represents the core API group. + if len(groupResource.Group) == 0 { + continue + } + + // Group names must be lower case and be valid DNS subdomains. + // reference: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md + // an error is returned for group name like rbac.authorization.k8s.io/v1beta1 + // rbac.authorization.k8s.io is the valid one + if msgs := validation.NameIsDNSSubdomain(groupResource.Group, false); len(msgs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), groupResource.Group, strings.Join(msgs, ","))) + } + } + return allErrs +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation_test.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation_test.go index 07e354bc7bf..99692157791 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation_test.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/validation_test.go @@ -32,7 +32,7 @@ func TestValidatePolicy(t *testing.T) { }, { // Specific request Level: audit.LevelRequestResponse, Verbs: []string{"get"}, - Resources: []audit.GroupResources{{Resources: []string{"secrets"}}}, + Resources: []audit.GroupResources{{Group: "rbac.authorization.k8s.io", Resources: []string{"roles", "rolebindings"}}}, Namespaces: []string{"kube-system"}, }, { // Some non-resource URLs Level: audit.LevelMetadata, @@ -41,6 +41,7 @@ func TestValidatePolicy(t *testing.T) { "/logs*", "/healthz*", "/metrics", + "*", }, }, } @@ -73,6 +74,33 @@ func TestValidatePolicy(t *testing.T) { Level: audit.LevelMetadata, Resources: []audit.GroupResources{{Resources: []string{"secrets"}}}, NonResourceURLs: []string{"/logs*"}, + }, { // invalid group name + Level: audit.LevelMetadata, + Resources: []audit.GroupResources{{Group: "rbac.authorization.k8s.io/v1beta1", Resources: []string{"roles"}}}, + }, { // invalid non-resource URLs + Level: audit.LevelMetadata, + NonResourceURLs: []string{ + "logs", + "/healthz*", + }, + }, { // empty non-resource URLs + Level: audit.LevelMetadata, + NonResourceURLs: []string{ + "", + "/healthz*", + }, + }, { // invalid non-resource URLs with multi "*" + Level: audit.LevelMetadata, + NonResourceURLs: []string{ + "/logs/*/*", + "/metrics", + }, + }, { // invalid non-resrouce URLs with "*" not in the end + Level: audit.LevelMetadata, + NonResourceURLs: []string{ + "/logs/*.log", + "/metrics", + }, }, } errorCases := []audit.Policy{} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go index 5ca4ef7ce8d..cd74e2ee51e 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -27,23 +27,18 @@ import ( reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_Event, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_EventList, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_GroupResources, InType: reflect.TypeOf(&GroupResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_Policy, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_PolicyList, InType: reflect.TypeOf(&PolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_audit_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_audit_Event, InType: reflect.TypeOf(&Event{})}, + {Fn: DeepCopy_audit_EventList, InType: reflect.TypeOf(&EventList{})}, + {Fn: DeepCopy_audit_GroupResources, InType: reflect.TypeOf(&GroupResources{})}, + {Fn: DeepCopy_audit_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + {Fn: DeepCopy_audit_Policy, InType: reflect.TypeOf(&Policy{})}, + {Fn: DeepCopy_audit_PolicyList, InType: reflect.TypeOf(&PolicyList{})}, + {Fn: DeepCopy_audit_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + {Fn: DeepCopy_audit_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + } } // DeepCopy_audit_Event is an autogenerated deepcopy function. diff --git a/staging/src/k8s.io/apiserver/pkg/audit/request.go b/staging/src/k8s.io/apiserver/pkg/audit/request.go index 4118d468af8..26807485c60 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/request.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/request.go @@ -50,9 +50,9 @@ func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs a // prefer the id from the headers. If not available, create a new one. // TODO(audit): do we want to forbid the header for non-front-proxy users? - ids := req.Header[auditinternal.HeaderAuditID] - if len(ids) > 0 { - ev.AuditID = types.UID(ids[0]) + ids := req.Header.Get(auditinternal.HeaderAuditID) + if ids != "" { + ev.AuditID = types.UID(ids) } else { ev.AuditID = types.UID(uuid.NewRandom().String()) } @@ -170,14 +170,16 @@ func LogRequestPatch(ae *audit.Event, patch []byte) { // LogResponseObject fills in the response object into an audit event. The passed runtime.Object // will be converted to the given gv. func LogResponseObject(ae *audit.Event, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { - if ae == nil || ae.Level.Less(audit.LevelRequestResponse) { + if ae == nil || ae.Level.Less(audit.LevelMetadata) { return } - if status, ok := obj.(*metav1.Status); ok { ae.ResponseStatus = status } + if ae.Level.Less(audit.LevelRequestResponse) { + return + } // TODO(audit): hook into the serializer to avoid double conversion var err error ae.ResponseObject, err = encodeObject(obj, gv, s) diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go index e007bf2d57f..4a30bb6359c 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go @@ -33,7 +33,7 @@ const bearerProtocolPrefix = "base64url.bearer.authorization.k8s.io." var protocolHeader = textproto.CanonicalMIMEHeaderKey("Sec-WebSocket-Protocol") -var invalidToken = errors.New("invalid bearer token") +var errInvalidToken = errors.New("invalid bearer token") // ProtocolAuthenticator allows a websocket connection to provide a bearer token as a subprotocol // in the format "base64url.bearer.authorization." @@ -102,7 +102,7 @@ func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Inf // If the token authenticator didn't error, provide a default error if !ok && err == nil { - err = invalidToken + err = errInvalidToken } return user, ok, err diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol_test.go b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol_test.go index 2a21aa65d92..62800a40d9d 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol_test.go +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/protocol_test.go @@ -59,8 +59,8 @@ func TestAuthenticateRequestTokenInvalid(t *testing.T) { if ok || user != nil { t.Errorf("expected not authenticated user") } - if err != invalidToken { - t.Errorf("expected invalidToken error, got %v", err) + if err != errInvalidToken { + t.Errorf("expected errInvalidToken error, got %v", err) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index 2667b3483f2..b17ba47910d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -174,30 +174,21 @@ func addGrouplessTypes() { } func addTestTypes() { - type ListOptions struct { - Object runtime.Object - metav1.TypeMeta `json:",inline"` - LabelSelector string `json:"labelSelector,omitempty"` - FieldSelector string `json:"fieldSelector,omitempty"` - Watch bool `json:"watch,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` - } scheme.AddKnownTypes(testGroupVersion, &genericapitesting.Simple{}, &genericapitesting.SimpleList{}, &metav1.ExportOptions{}, &metav1.DeleteOptions{}, &genericapitesting.SimpleGetOptions{}, &genericapitesting.SimpleRoot{}, - &SimpleXGSubresource{}) + &genericapitesting.SimpleXGSubresource{}) scheme.AddKnownTypes(testGroupVersion, &examplev1.Pod{}) scheme.AddKnownTypes(testInternalGroupVersion, &genericapitesting.Simple{}, &genericapitesting.SimpleList{}, &metav1.ExportOptions{}, &genericapitesting.SimpleGetOptions{}, &genericapitesting.SimpleRoot{}, - &SimpleXGSubresource{}) + &genericapitesting.SimpleXGSubresource{}) scheme.AddKnownTypes(testInternalGroupVersion, &example.Pod{}) // Register SimpleXGSubresource in both testGroupVersion and testGroup2Version, and also their // their corresponding internal versions, to verify that the desired group version object is // served in the tests. - scheme.AddKnownTypes(testGroup2Version, &SimpleXGSubresource{}, &metav1.ExportOptions{}) - scheme.AddKnownTypes(testInternalGroup2Version, &SimpleXGSubresource{}, &metav1.ExportOptions{}) + scheme.AddKnownTypes(testGroup2Version, &genericapitesting.SimpleXGSubresource{}, &metav1.ExportOptions{}) + scheme.AddKnownTypes(testInternalGroup2Version, &genericapitesting.SimpleXGSubresource{}, &metav1.ExportOptions{}) metav1.AddToGroupVersion(scheme, testGroupVersion) } @@ -1153,10 +1144,10 @@ func TestList(t *testing.T) { t.Errorf("%d: %q unexpected resource namespace: %s", i, testCase.url, simpleStorage.actualNamespace) } if simpleStorage.requestedLabelSelector == nil || simpleStorage.requestedLabelSelector.String() != testCase.label { - t.Errorf("%d: unexpected label selector: %v", i, simpleStorage.requestedLabelSelector) + t.Errorf("%d: unexpected label selector: expected=%v got=%v", i, testCase.label, simpleStorage.requestedLabelSelector) } if simpleStorage.requestedFieldSelector == nil || simpleStorage.requestedFieldSelector.String() != testCase.field { - t.Errorf("%d: unexpected field selector: %v", i, simpleStorage.requestedFieldSelector) + t.Errorf("%d: unexpected field selector: expected=%v got=%v", i, testCase.field, simpleStorage.requestedFieldSelector) } } } @@ -3935,23 +3926,12 @@ func TestUpdateChecksAPIVersion(t *testing.T) { } } -// SimpleXGSubresource is a cross group subresource, i.e. the subresource does not belong to the -// same group as its parent resource. -type SimpleXGSubresource struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - SubresourceInfo string `json:"subresourceInfo,omitempty"` - Labels map[string]string `json:"labels,omitempty"` -} - -func (obj *SimpleXGSubresource) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } - type SimpleXGSubresourceRESTStorage struct { - item SimpleXGSubresource + item genericapitesting.SimpleXGSubresource } func (storage *SimpleXGSubresourceRESTStorage) New() runtime.Object { - return &SimpleXGSubresource{} + return &genericapitesting.SimpleXGSubresource{} } func (storage *SimpleXGSubresourceRESTStorage) Get(ctx request.Context, id string, options *metav1.GetOptions) (runtime.Object, error) { @@ -3969,7 +3949,7 @@ func TestXGSubresource(t *testing.T) { itemID := "theID" subresourceStorage := &SimpleXGSubresourceRESTStorage{ - item: SimpleXGSubresource{ + item: genericapitesting.SimpleXGSubresource{ SubresourceInfo: "foo", }, } @@ -4018,7 +3998,7 @@ func TestXGSubresource(t *testing.T) { if resp.StatusCode != http.StatusOK { t.Fatalf("unexpected response: %#v", resp) } - var itemOut SimpleXGSubresource + var itemOut genericapitesting.SimpleXGSubresource body, err := extractBody(resp, &itemOut) if err != nil { t.Errorf("unexpected error: %v", err) @@ -4030,7 +4010,7 @@ func TestXGSubresource(t *testing.T) { // conversion type list in API scheme and hence cannot be converted from input type object // to output type object. So it's values don't appear in the decoded output object. decoder := json.NewDecoder(strings.NewReader(body)) - var itemFromBody SimpleXGSubresource + var itemFromBody genericapitesting.SimpleXGSubresource err = decoder.Decode(&itemFromBody) if err != nil { t.Errorf("unexpected JSON decoding error: %v", err) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD index e1f01128710..7a13d57b273 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -21,11 +21,18 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ + "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -33,6 +40,7 @@ go_test( "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/audit/log:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/webhook:go_default_library", ], ) @@ -54,7 +62,10 @@ go_library( "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/audit_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/audit_test.go index b85f6c8c23f..16cd752c478 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/audit_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/audit_test.go @@ -30,12 +30,21 @@ import ( "testing" "time" + "github.com/pborman/uuid" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" pluginlog "k8s.io/apiserver/plugin/pkg/audit/log" + // import to call webhook's init() function to register audit.Event to schema + _ "k8s.io/apiserver/plugin/pkg/audit/webhook" ) type fakeAuditSink struct { @@ -177,7 +186,7 @@ func (*fakeHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { w.WriteHeader(200) } -func TestAudit(t *testing.T) { +func TestAuditLegacy(t *testing.T) { writingShortRunningPrefix := func(stage string) string { return fmt.Sprintf(`[\d\:\-\.\+TZ]+ AUDIT: id="[\w-]+" stage="%s" ip="127.0.0.1" method="update" user="admin" groups="" as="" asgroups="" namespace="default" uri="/api/v1/namespaces/default/pods/foo"`, stage) } @@ -380,7 +389,7 @@ func TestAudit(t *testing.T) { }, } { var buf bytes.Buffer - backend := pluginlog.NewBackend(&buf) + backend := pluginlog.NewBackend(&buf, pluginlog.FormatLegacy) policyChecker := policy.FakeChecker(auditinternal.LevelRequestResponse) handler := WithAudit(http.HandlerFunc(test.handler), &fakeRequestContextMapper{ user: &user.DefaultInfo{Name: "admin"}, @@ -420,6 +429,483 @@ func TestAudit(t *testing.T) { } } +func TestAuditJson(t *testing.T) { + shortRunningPath := "/api/v1/namespaces/default/pods/foo" + longRunningPath := "/api/v1/namespaces/default/pods?watch=true" + + delay := 500 * time.Millisecond + + for _, test := range []struct { + desc string + path string + verb string + auditID string + handler func(http.ResponseWriter, *http.Request) + expected []auditv1alpha1.Event + }{ + // short running requests with read-only verb + { + "read-only empty", + shortRunningPath, + "GET", + "", + func(http.ResponseWriter, *http.Request) {}, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "get", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "get", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "short running with auditID", + shortRunningPath, + "GET", + uuid.NewRandom().String(), + func(http.ResponseWriter, *http.Request) {}, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "get", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "get", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "read-only panic", + shortRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + panic("kaboom") + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "get", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StagePanic, + Verb: "get", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 500}, + }, + }, + }, + // short running request with non-read-only verb + { + "writing empty", + shortRunningPath, + "PUT", + "", + func(http.ResponseWriter, *http.Request) {}, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "update", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "update", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "writing sleep", + shortRunningPath, + "PUT", + "", + func(http.ResponseWriter, *http.Request) { + time.Sleep(delay) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "update", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "update", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "writing 403+write", + shortRunningPath, + "PUT", + "", + func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(403) + w.Write([]byte("foo")) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "update", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "update", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 403}, + }, + }, + }, + { + "writing panic", + shortRunningPath, + "PUT", + "", + func(w http.ResponseWriter, req *http.Request) { + panic("kaboom") + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "update", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StagePanic, + Verb: "update", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 500}, + }, + }, + }, + { + "writing write+panic", + shortRunningPath, + "PUT", + "", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("foo")) + panic("kaboom") + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "update", + RequestURI: shortRunningPath, + }, + { + Stage: auditinternal.StagePanic, + Verb: "update", + RequestURI: shortRunningPath, + ResponseStatus: &metav1.Status{Code: 500}, + }, + }, + }, + // long running requests + { + "empty longrunning", + longRunningPath, + "GET", + "", + func(http.ResponseWriter, *http.Request) {}, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "empty longrunning", + longRunningPath, + "GET", + uuid.NewRandom().String(), + func(http.ResponseWriter, *http.Request) {}, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "sleep longrunning", + longRunningPath, + "GET", + "", + func(http.ResponseWriter, *http.Request) { + time.Sleep(delay) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "sleep+403 longrunning", + longRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + time.Sleep(delay) + w.WriteHeader(403) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 403}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 403}, + }, + }, + }, + { + "write longrunning", + longRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("foo")) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + }, + }, + { + "403+write longrunning", + longRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(403) + w.Write([]byte("foo")) + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 403}, + }, + { + Stage: auditinternal.StageResponseComplete, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 403}, + }, + }, + }, + { + "panic longrunning", + longRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + panic("kaboom") + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StagePanic, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 500}, + }, + }, + }, + { + "write+panic longrunning", + longRunningPath, + "GET", + "", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("foo")) + panic("kaboom") + }, + []auditv1alpha1.Event{ + { + Stage: auditinternal.StageRequestReceived, + Verb: "watch", + RequestURI: longRunningPath, + }, + { + Stage: auditinternal.StageResponseStarted, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 200}, + }, + { + Stage: auditinternal.StagePanic, + Verb: "watch", + RequestURI: longRunningPath, + ResponseStatus: &metav1.Status{Code: 500}, + }, + }, + }, + } { + var buf bytes.Buffer + backend := pluginlog.NewBackend(&buf, pluginlog.FormatJson) + policyChecker := policy.FakeChecker(auditinternal.LevelRequestResponse) + handler := WithAudit(http.HandlerFunc(test.handler), &fakeRequestContextMapper{ + user: &user.DefaultInfo{Name: "admin"}, + }, backend, policyChecker, func(r *http.Request, ri *request.RequestInfo) bool { + // simplified long-running check + return ri.Verb == "watch" + }) + + req, _ := http.NewRequest(test.verb, test.path, nil) + if test.auditID != "" { + req.Header.Add("Audit-ID", test.auditID) + } + req.RemoteAddr = "127.0.0.1" + + func() { + defer func() { + recover() + }() + handler.ServeHTTP(httptest.NewRecorder(), req) + }() + + t.Logf("[%s] audit log: %v", test.desc, buf.String()) + + line := strings.Split(strings.TrimSpace(buf.String()), "\n") + if len(line) != len(test.expected) { + t.Errorf("[%s] Unexpected amount of lines in audit log: %d", test.desc, len(line)) + continue + } + expectedID := types.UID("") + for i, expect := range test.expected { + // decode events back to check json elements. + event := &auditv1alpha1.Event{} + decoder := audit.Codecs.UniversalDecoder(auditv1alpha1.SchemeGroupVersion) + if err := runtime.DecodeInto(decoder, []byte(line[i]), event); err != nil { + t.Errorf("failed decoding line %s: %v", line[i], err) + continue + } + if "admin" != event.User.Username { + t.Errorf("[%s] Unexpected username: %s", test.desc, event.User.Username) + } + if event.Stage != expect.Stage { + t.Errorf("[%s] Unexpected Stage: %s", test.desc, event.Stage) + } + if event.Verb != expect.Verb { + t.Errorf("[%s] Unexpected Verb: %s", test.desc, event.Verb) + } + if event.RequestURI != expect.RequestURI { + t.Errorf("[%s] Unexpected RequestURI: %s", test.desc, event.RequestURI) + } + if test.auditID != "" && event.AuditID != types.UID(test.auditID) { + t.Errorf("[%s] Unexpected AuditID in audit event, AuditID should be the same with Audit-ID http header", test.desc) + } + if expectedID == types.UID("") { + expectedID = event.AuditID + } else if expectedID != event.AuditID { + t.Errorf("[%s] Audits for one request should share the same AuditID, %s differs from %s", test.desc, expectedID, event.AuditID) + } + if (event.ResponseStatus == nil) != (expect.ResponseStatus == nil) { + t.Errorf("[%s] Unexpected ResponseStatus: %v", test.desc, event.ResponseStatus) + continue + } + if (event.ResponseStatus != nil) && (event.ResponseStatus.Code != expect.ResponseStatus.Code) { + t.Errorf("[%s] Unexpected status code : %d", test.desc, event.ResponseStatus.Code) + } + } + } +} + type fakeRequestContextMapper struct { user *user.DefaultInfo } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index 17bc44f7b76..a53db72137d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -17,13 +17,19 @@ limitations under the License. package filters import ( + "errors" "net/http" "strings" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" ) @@ -76,22 +82,25 @@ func WithAuthentication(handler http.Handler, mapper genericapirequest.RequestCo ) } -func Unauthorized(supportsBasicAuth bool) http.HandlerFunc { - if supportsBasicAuth { - return unauthorizedBasicAuth - } - return unauthorized -} +func Unauthorized(requestContextMapper request.RequestContextMapper, s runtime.NegotiatedSerializer, supportsBasicAuth bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if supportsBasicAuth { + w.Header().Set("WWW-Authenticate", `Basic realm="kubernetes-master"`) + } + ctx, ok := requestContextMapper.Get(req) + if !ok { + responsewriters.InternalError(w, req, errors.New("no context found for request")) + return + } + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } -// unauthorizedBasicAuth serves an unauthorized message to clients. -func unauthorizedBasicAuth(w http.ResponseWriter, req *http.Request) { - w.Header().Set("WWW-Authenticate", `Basic realm="kubernetes-master"`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) -} - -// unauthorized serves an unauthorized message to clients. -func unauthorized(w http.ResponseWriter, req *http.Request) { - http.Error(w, "Unauthorized", http.StatusUnauthorized) + gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + responsewriters.ErrorNegotiated(ctx, apierrors.NewUnauthorized("Unauthorized"), s, gv, w, req) + }) } // compressUsername maps all possible usernames onto a small set of categories diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index 90599fe3e9a..d244257a04d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -22,13 +22,14 @@ import ( "github.com/golang/glog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/request" ) // WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. -func WithAuthorization(handler http.Handler, requestContextMapper request.RequestContextMapper, a authorizer.Authorizer) http.Handler { +func WithAuthorization(handler http.Handler, requestContextMapper request.RequestContextMapper, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { if a == nil { glog.Warningf("Authorization is disabled") return handler @@ -56,7 +57,7 @@ func WithAuthorization(handler http.Handler, requestContextMapper request.Reques } glog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) - responsewriters.Forbidden(attributes, w, req, reason) + responsewriters.Forbidden(ctx, attributes, w, req, reason, s) }) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 891b339ecce..a31bff12591 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -26,6 +26,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -35,7 +36,7 @@ import ( ) // WithImpersonation is a filter that will inspect and check requests that attempt to change the user.Info for their requests -func WithImpersonation(handler http.Handler, requestContextMapper request.RequestContextMapper, a authorizer.Authorizer) http.Handler { +func WithImpersonation(handler http.Handler, requestContextMapper request.RequestContextMapper, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { impersonationRequests, err := buildImpersonationRequests(req.Header) if err != nil { @@ -104,32 +105,22 @@ func WithImpersonation(handler http.Handler, requestContextMapper request.Reques default: glog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) - responsewriters.Forbidden(actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest)) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) return } allowed, reason, err := a.Authorize(actingAsAttributes) if err != nil || !allowed { glog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) - responsewriters.Forbidden(actingAsAttributes, w, req, reason) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) return } } if !groupsSpecified && username != user.Anonymous { // When impersonating a non-anonymous user, if no groups were specified - // if neither the system:authenticated nor system:unauthenticated groups are explicitly included, // include the system:authenticated group in the impersonated user info - found := false - for _, group := range groups { - if group == user.AllAuthenticated || group == user.AllUnauthenticated { - found = true - break - } - } - if !found { - groups = append(groups, user.AllAuthenticated) - } + groups = append(groups, user.AllAuthenticated) } newUser := &user.DefaultInfo{ diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation_test.go index 1c9adb61bc0..d43776507ed 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation_test.go @@ -26,6 +26,8 @@ import ( "testing" authenticationapi "k8s.io/api/authentication/v1" + "k8s.io/apimachinery/pkg/runtime" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/request" @@ -356,7 +358,7 @@ func TestImpersonationFilter(t *testing.T) { delegate.ServeHTTP(w, req) }) - }(WithImpersonation(doNothingHandler, requestContextMapper, impersonateAuthorizer{})) + }(WithImpersonation(doNothingHandler, requestContextMapper, impersonateAuthorizer{}, serializer.NewCodecFactory(runtime.NewScheme()))) handler = request.WithRequestContext(handler, requestContextMapper) server := httptest.NewServer(handler) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 15e8cd11873..a7569621219 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -62,6 +62,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", @@ -73,7 +74,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 48c20e5e437..ca585e6ddf4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -87,8 +87,8 @@ func strategicPatchObject( objToUpdate runtime.Object, versionedObj runtime.Object, ) error { - originalObjMap := make(map[string]interface{}) - if err := unstructured.DefaultConverter.ToUnstructured(originalObject, &originalObjMap); err != nil { + originalObjMap, err := unstructured.DefaultConverter.ToUnstructured(originalObject) + if err != nil { return err } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go index d856440a836..f8c1a60e94c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go @@ -33,12 +33,12 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/net" + proxyutil "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/server/httplog" - proxyutil "k8s.io/apiserver/pkg/util/proxy" "github.com/golang/glog" ) @@ -60,10 +60,12 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { var httpCode int reqStart := time.Now() defer func() { - metrics.Monitor(&verb, &apiResource, &subresource, + metrics.Monitor( + verb, apiResource, subresource, net.GetHTTPClient(req), w.Header().Get("Content-Type"), - httpCode, reqStart) + httpCode, reqStart, + ) }() ctx, ok := r.Mapper.Get(req) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD index f4ccbeab32c..47986c453c2 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD @@ -19,9 +19,12 @@ go_test( deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", ], ) @@ -35,6 +38,7 @@ go_library( ], tags = ["automanaged"], deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go index 5c6c9bf7561..b831abdec1d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go @@ -21,8 +21,12 @@ import ( "net/http" "strings" - "k8s.io/apimachinery/pkg/util/runtime" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/request" ) // Avoid emitting errors that look like valid HTML. Quotes are okay. @@ -37,17 +41,19 @@ func BadGatewayError(w http.ResponseWriter, req *http.Request) { } // Forbidden renders a simple forbidden error -func Forbidden(attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string) { +func Forbidden(ctx request.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) { msg := sanitizer.Replace(forbiddenMessage(attributes)) - w.Header().Set("Content-Type", "text/plain") w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(http.StatusForbidden) + var errMsg string if len(reason) == 0 { - fmt.Fprintf(w, "%s", msg) + errMsg = fmt.Sprintf("%s", msg) } else { - fmt.Fprintf(w, "%s: %q", msg, reason) + errMsg = fmt.Sprintf("%s: %q", msg, reason) } + gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()} + gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()} + ErrorNegotiated(ctx, apierrors.NewForbidden(gr, attributes.GetName(), fmt.Errorf(errMsg)), s, gv, w, req) } func forbiddenMessage(attributes authorizer.Attributes) string { @@ -81,7 +87,7 @@ func InternalError(w http.ResponseWriter, req *http.Request, err error) { w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Internal Server Error: %q: %v", sanitizer.Replace(req.RequestURI), err) - runtime.HandleError(err) + utilruntime.HandleError(err) } // NotFound renders a simple not found error. diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors_test.go index 31f45d1e10c..8ea30401e27 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors_test.go @@ -22,8 +22,11 @@ import ( "net/http/httptest" "testing" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/request" ) func TestErrors(t *testing.T) { @@ -60,25 +63,32 @@ func TestErrors(t *testing.T) { func TestForbidden(t *testing.T) { u := &user.DefaultInfo{Name: "NAME"} cases := []struct { - expected string - attributes authorizer.Attributes - reason string + expected string + attributes authorizer.Attributes + reason string + contentType string }{ - {`User "NAME" cannot GET path "/whatever".`, - authorizer.AttributesRecord{User: u, Verb: "GET", Path: "/whatever"}, ""}, - {`User "NAME" cannot GET path "/<script>".`, - authorizer.AttributesRecord{User: u, Verb: "GET", Path: "/